pike.git / src / code / amd64.c

version» Context lines:

pike.git/src/code/amd64.c:213:    }    }    else    {    add_to_program(0x83); /* AND REG,imm8 */    modrm( 3, 4, reg );    ib( imm32 );    }   }    + static void and_reg32_imm( enum amd64_reg reg, int imm32 ) + { +  rex( 0, 0, 0, reg ); +  +  if( imm32 < -0x80 || imm32 > 0x7f ) +  { +  if( reg == REG_RAX ) +  { +  opcode( 0x25 ); /* AND rax,imm32 */ +  id( imm32 ); +  } +  else +  { +  opcode( 0x81 ); /* AND REG,imm32 */ +  modrm( 3,4, reg); +  id( imm32 ); +  } +  } +  else +  { +  add_to_program(0x83); /* AND REG,imm8 */ +  modrm( 3, 4, reg ); +  ib( imm32 ); +  } + } +    static void mov_mem32_reg( enum amd64_reg from_reg, ptrdiff_t offset, enum amd64_reg to_reg )   {    rex( 0, to_reg, 0, from_reg );    low_mov_mem_reg( from_reg, offset, to_reg );   }      static void mov_mem16_reg( enum amd64_reg from_reg, ptrdiff_t offset, enum amd64_reg to_reg )   {   #if 0    mov_mem32_reg( from_reg, offset, to_reg );
pike.git/src/code/amd64.c:265:    modrm( 3, 4, from_reg );    }    else    {    opcode( 0xc1 );    modrm( 3, 4, from_reg );    ib( shift );    }   }    + static void shl_reg_reg( enum amd64_reg reg, enum amd64_reg sreg) + { +  if( sreg != REG_RCX ) +  Pike_fatal("Not supported\n"); +  +  rex( 1, 0, 0, reg ); +  opcode( 0xd3 ); +  modrm( 3, 4, reg ); + } +  + static void shl_reg32_reg( enum amd64_reg reg, enum amd64_reg sreg) + { +  if( sreg != REG_RCX ) +  Pike_fatal("Not supported\n"); +  rex( 0, 0, 0, reg ); +  opcode( 0xd3 ); +  modrm( 3, 4, reg ); + } +  + static void shl_reg_mem( enum amd64_reg reg, enum amd64_reg mem, int offset) + { +  if( reg == REG_RCX ) +  Pike_fatal("Not supported\n"); +  mov_mem8_reg( mem, offset, REG_RCX ); +  shl_reg_reg( reg, REG_RCX ); + } +    static void shr_reg_imm( enum amd64_reg from_reg, int shift )   {    rex( 1, from_reg, 0, 0 );    if( shift == 1 )    {    opcode( 0xd1 ); /* SAR */    modrm( 3, 7, from_reg );    }    else    {
pike.git/src/code/amd64.c:302:      static void mov_imm_reg( long imm, enum amd64_reg reg )   {    if( (imm > 0x7fffffffLL) || (imm < -0x80000000LL) )    {    rex(1,0,0,reg);    opcode(0xb8 | (reg&0x7)); /* mov imm64 -> reg64 */    id( (imm & 0xffffffffLL) );    id( ((imm >> 32)&0xffffffffLL) );    } +  else if( imm > 0 ) +  { +  rex(0,0,0,reg); +  opcode( 0xc7 ); /* mov imm32 -> reg/m 32, SE*/ +  modrm( 3,0,reg ); +  id( (int)imm ); +  }    else    {    rex(1,0,0,reg);    opcode( 0xc7 ); /* mov imm32 -> reg/m 64, SE*/    modrm( 3,0,reg );    id( (int)imm );    }   }      static void low_mov_reg_mem(enum amd64_reg from_reg, enum amd64_reg to_reg, ptrdiff_t offset )
pike.git/src/code/amd64.c:772:   static void jl( struct label *l ) { return jump_rel8( l, 0x7c ); }   static void jle( struct label *l ) { return jump_rel8( l, 0x7e ); }   static void jo( struct label *l ) { return jump_rel8( l, 0x70 ); }   static void jno( struct label *l ) { return jump_rel8( l, 0x71 ); }   static void jc( struct label *l ) { return jump_rel8( l, 0x72 ); }   static void jnc( struct label *l ) { return jump_rel8( l, 0x73 ); }   static void jz( struct label *l ) { return jump_rel8( l, 0x74 ); }   static void jnz( struct label *l ) { return jump_rel8( l, 0x75 ); }       - #define LABELS() struct label label_A, label_B, label_C, label_D;label_A.addr = -1;label_A.n_label_uses = 0;label_B.addr = -1;label_B.n_label_uses = 0;label_C.addr = -1;label_C.n_label_uses = 0;label_D.addr = -1;label_D.n_label_uses = 0; + #define LABELS() struct label label_A, label_B, label_C, label_D, label_E;label_A.addr = -1;label_A.n_label_uses = 0;label_B.addr = -1;label_B.n_label_uses = 0;label_C.addr = -1;label_C.n_label_uses = 0;label_D.addr = -1;label_D.n_label_uses = 0;label_E.addr=-1;label_E.n_label_uses=0;   #define LABEL_A label(&label_A)   #define LABEL_B label(&label_B)   #define LABEL_C label(&label_C)   #define LABEL_D label(&label_D) -  + #define LABEL_E label(&label_E)      /* Machine code entry prologue.    *    * On entry:    * RDI: Pike_interpreter (ARG1_REG)    *    * During interpreting:    * R15: Pike_interpreter    */   void amd64_ins_entry(void)
pike.git/src/code/amd64.c:898:      static void amd64_add_mark_sp( int num )   {    amd64_load_mark_sp_reg();    add_reg_imm( mark_sp_reg, sizeof(struct svalue*)*num);    dirty_regs |= 1 << PIKE_MARK_SP_REG;    flush_dirty_regs(); /* FIXME: Need to change LABEL handling to remove this */   }      /* Note: Uses RAX and RCX internally. reg MUST not be REG_RAX. */ - static void amd64_push_svaluep(int reg) + static void amd64_push_svaluep_to(int reg, int spoff)   {    LABELS(); -  +  if( reg == REG_RAX ) +  Pike_fatal("Using RAX in push_svaluep not supported\n" );    amd64_load_sp_reg();    mov_mem_reg(reg, OFFSETOF(svalue, type), REG_RAX);    mov_mem_reg(reg, OFFSETOF(svalue, u.refs), REG_RCX); -  mov_reg_mem(REG_RAX, sp_reg, OFFSETOF(svalue, type)); +  mov_reg_mem(REG_RAX, sp_reg, spoff*sizeof(struct svalue)+OFFSETOF(svalue, type));    and_reg_imm(REG_RAX, 0x1f); -  mov_reg_mem(REG_RCX, sp_reg, OFFSETOF(svalue, u.refs)); +  mov_reg_mem(REG_RCX, sp_reg, spoff*sizeof(struct svalue)+OFFSETOF(svalue, u.refs));    cmp_reg32_imm(REG_RAX, MAX_REF_TYPE);    jg(&label_A); -  add_imm_mem( 1, REG_RCX,OFFSETOF(pike_string, refs)); +  add_imm_mem( 1, REG_RCX, OFFSETOF(pike_string, refs));    LABEL_A; -  + } +  + static void amd64_push_svaluep(int reg) + { +  amd64_push_svaluep_to( reg, 0 );    amd64_add_sp( 1 );   }      static void amd64_push_int(INT64 value, int subtype)   {    amd64_load_sp_reg();    mov_imm_mem((subtype<<16) + PIKE_T_INT, sp_reg, OFFSETOF(svalue, type));    mov_imm_mem(value, sp_reg, OFFSETOF(svalue, u.integer));    amd64_add_sp( 1 );   }
pike.git/src/code/amd64.c:947:    mov_reg_mem(REG_RAX, mark_sp_reg, 0);    } else {    mov_reg_mem(sp_reg, mark_sp_reg, 0);    }    amd64_add_mark_sp( 1 );   }      static void mov_sval_type(enum amd64_reg src, enum amd64_reg dst )   {    mov_mem8_reg( src, OFFSETOF(svalue,type), dst); -  and_reg_imm( dst, 0x1f ); +  and_reg32_imm( dst, 0x1f );   }         static void amd64_call_c_function(void *addr)   {    flush_dirty_regs();    call_imm(addr);   }      static void amd64_free_svalue(enum amd64_reg src, int guaranteed_ref )
pike.git/src/code/amd64.c:1181:   static void amd64_call_c_opcode(void *addr, int flags)   {    sync_registers(flags);    call_imm( addr );   }         #ifdef PIKE_DEBUG   static void ins_debug_instr_prologue (PIKE_INSTR_T instr, INT32 arg1, INT32 arg2)   { +  return;    int flags = instrs[instr].flags;       maybe_update_pc();       if (flags & I_HASARG2)    mov_imm_reg(arg2, ARG3_REG);    if (flags & I_HASARG)    mov_imm_reg(arg1, ARG2_REG);    mov_imm_reg(instr, ARG1_REG);   
pike.git/src/code/amd64.c:1278:    pop(REG_RBX);    pop(REG_R12);    pop(REG_R13);    pop(REG_R14);    pop(REG_R15);    pop(REG_RBP);    ret();    }   }    + static void amd64_align() + { +  while( PIKE_PC & 7 ) +  ib( 0x90 ); + } +    void ins_f_byte(unsigned int b)   {    int flags;    void *addr;    INT32 rel_addr = 0;    LABELS();       b-=F_OFFSET;   #ifdef PIKE_DEBUG    if(b>255)
pike.git/src/code/amd64.c:1303:       addr=instrs[b].address;    switch(b + F_OFFSET)    {    case F_DUP:    amd64_load_sp_reg();    ins_debug_instr_prologue(b, 0, 0);    add_reg_imm_reg(sp_reg, -sizeof(struct svalue), REG_R10 );    amd64_push_svaluep( REG_R10 );    return; +    #if 0 -  +  case F_ESCAPE_CATCH: +     case F_EXIT_CATCH:    ins_f_byte( F_ESCAPE_CATCH );    amd64_load_sp_reg();    amd64_push_int( 0, 1 );    return;   #endif -  +  /* -3 -2 -1 0 +  * lval[0], lval[1], <RANDOM>, **SP** +  * -> +  * -4 -3 -2 -1 0 +  * lval[0], lval[1], *lval, <RANDOM>, *SP** +  * +  * This will also free *lval iff it has type +  * array,multiset,mapping or string. +  */ +  case F_LTOSVAL2_AND_FREE: +  { +  amd64_load_sp_reg(); +  mov_mem8_reg( sp_reg, -3*sizeof(struct svalue), REG_RBX ); +  cmp_reg_imm( REG_RBX, T_SVALUE_PTR ); +  jne( &label_A ); +  +  /* inline version for SVALUE_PTR. */ +  +  /* First, make room for the result, move top and inc sp */ +  mov_mem_reg( sp_reg, -sizeof(struct svalue), REG_RAX ); +  mov_mem_reg( sp_reg, -sizeof(struct svalue)+8, REG_RCX ); +  mov_reg_mem( REG_RAX, sp_reg, 0); +  mov_reg_mem( REG_RCX, sp_reg, 8); +  amd64_add_sp( 1 ); +  +  mov_mem_reg( sp_reg, +  -4*sizeof(struct svalue)+OFFSETOF(svalue,u.lval), +  REG_RBX ); +  amd64_push_svaluep_to(REG_RBX, -2); +  +  /* Result is now in sp[-2], lval in -4. */ +  /* push svaluep leaves type in RAX */ +  mov_reg_reg( REG_RAX, REG_RCX ); +  /* mov_mem8_reg( sp_reg, -2*sizeof(struct svalue), REG_RCX ); */ +  mov_imm_reg( 1, REG_RAX ); +  shl_reg32_reg( REG_RAX, REG_RCX ); +  and_reg_imm( REG_RAX, (BIT_ARRAY|BIT_MULTISET|BIT_MAPPING|BIT_STRING)); +  jz( &label_B ); /* Do nothing.. */ +  +  /* Free the old value. */ +  amd64_free_svalue( REG_RBX, 0 ); +  /* assign 0 */ +  mov_imm_mem( PIKE_T_INT, REG_RBX, 0 ); +  mov_imm_mem( 0, REG_RBX, 8 ); +  jmp( &label_B ); /* All done */ +  +  LABEL_A; +  /* So, not a svalueptr. Use C-version */ +  amd64_call_c_opcode( addr, flags ); +  amd64_load_sp_reg(); +  LABEL_B; +  } +  return; +  +  case F_LTOSVAL: +  { +  amd64_load_sp_reg(); +  mov_mem8_reg( sp_reg, -sizeof(struct svalue)*2, REG_RAX ); +  /* lval type in RAX. */ +  /* +  if( rax == T_SVALUE_PTR ) +  push( *l->u.lval ) +  +  possibly: +  if( rax == T_OBJECT ) +  object_index_no_free( to, lval->u.object, lval->subtype, &lval[1]) +  +  if( rax == T_ARRAY && lval[1].type == PIKE_T_INT ) +  push( l->u.array->item[&lval[1].u.integer] ) +  */ +  cmp_reg_imm( REG_RAX, T_SVALUE_PTR ); +  je( &label_A ); +  /* So, not a svalueptr */ +  mov_reg_reg( sp_reg, ARG1_REG ); +  add_reg_imm_reg( sp_reg, -2*sizeof(struct svalue), ARG2_REG ); +  amd64_call_c_function(lvalue_to_svalue_no_free); +  amd64_add_sp(1); +  jmp(&label_B); +  LABEL_A; +  mov_mem_reg( sp_reg, -sizeof(struct svalue)*2+OFFSETOF(svalue,u.lval), +  REG_RCX ); +  amd64_push_svaluep(REG_RCX); +  LABEL_B; +  } +  return; +  case F_ASSIGN: +  { +  amd64_load_sp_reg(); +  mov_mem8_reg( sp_reg, -3*sizeof(struct svalue), REG_RAX ); +  cmp_reg_imm( REG_RAX, T_SVALUE_PTR ); +  +  je( &label_A ); +  /* So, not a svalueptr. Use C-version for simplicity */ +  amd64_call_c_opcode( addr, flags ); +  amd64_load_sp_reg(); +  jmp( &label_B ); +  amd64_align(); +  +  LABEL_A; +  mov_mem_reg( sp_reg, -3*sizeof(struct svalue)+8, REG_RBX ); +  +  /* Free old value. */ +  amd64_free_svalue( REG_RBX, 0 ); +  +  /* assign new value */ +  /* also move assigned value -> sp[-3] */ +  mov_mem_reg( sp_reg, -sizeof(struct svalue), REG_RAX ); +  mov_mem_reg( sp_reg, -sizeof(struct svalue)+8, REG_RCX ); +  mov_reg_mem( REG_RAX, REG_RBX, 0 ); +  mov_reg_mem( REG_RCX, REG_RBX, 8 ); +  add_reg_imm_reg( sp_reg, -sizeof(struct svalue), REG_RCX ); +  amd64_push_svaluep_to( REG_RCX, -3 ); +  /* +  Note: For SVALUEPTR we know we do not have to free +  the lvalue. +  */ +  amd64_add_sp( -2 ); +  LABEL_B; +  } +  return; +  case F_ASSIGN_AND_POP: +  { +  amd64_load_sp_reg(); +  mov_mem8_reg( sp_reg, -3*sizeof(struct svalue), REG_RAX ); +  cmp_reg_imm( REG_RAX, T_SVALUE_PTR ); +  +  je( &label_A ); +  /* So, not a svalueptr. Use C-version for simplicity */ +  amd64_call_c_opcode( addr, flags ); +  amd64_load_sp_reg(); +  jmp( &label_B ); +  amd64_align(); +  +  LABEL_A; +  mov_mem_reg( sp_reg, -3*sizeof(struct svalue)+8, REG_RBX ); +  +  /* Free old value. */ +  amd64_free_svalue( REG_RBX, 0 ); +  +  /* assign new value */ +  mov_mem_reg( sp_reg, -sizeof(struct svalue), REG_RAX ); +  mov_mem_reg( sp_reg, -sizeof(struct svalue)+8, REG_RCX ); +  mov_reg_mem( REG_RAX, REG_RBX, 0 ); +  mov_reg_mem( REG_RCX, REG_RBX, 8 ); +  /* +  Note: For SVALUEPTR we know we do not have to free +  the lvalue. +  */ +  amd64_add_sp( -3 ); +  LABEL_B; +  } +  return; +  return;    case F_ADD_INTS:    {    amd64_load_sp_reg();    ins_debug_instr_prologue(b, 0, 0);    mov_mem8_reg( sp_reg, -sizeof(struct svalue)*2, REG_RAX );    shl_reg_imm( REG_RAX, 8 );    mov_mem8_reg( sp_reg,-sizeof(struct svalue), REG_RBX );    /* and_reg_imm( REG_RBX, 0x1f );*/    add_reg_reg( REG_RAX, REG_RBX );    /* and_reg_imm( REG_RAX, 0x1f1f ); */
pike.git/src/code/amd64.c:1557:    }    if (flags & I_JUMP) {    jmp_reg(REG_RAX);       if (b + F_OFFSET == F_CATCH) {    upd_pointer(rel_addr - 4, PIKE_PC - rel_addr);    }    }   }    - static void ALIGN() - { -  while( PIKE_PC & 7 ) -  ib( 0x90 ); - } -  +    int amd64_ins_f_jump(unsigned int op, int backward_jump)   {    int flags;    void *addr;    int off = op - F_OFFSET;    int ret = -1;    LABELS();      #ifdef PIKE_DEBUG    if(off>255)
pike.git/src/code/amd64.c:1604:    test_reg(REG_RAX);    if( op == F_QUICK_BRANCH_WHEN_ZERO )    return jz_imm_rel32(0);    return jnz_imm_rel32(0);       case F_BRANCH_WHEN_ZERO:    case F_BRANCH_WHEN_NON_ZERO:    START_JUMP();    amd64_load_sp_reg();    mov_mem8_reg( sp_reg, -sizeof(struct svalue), REG_RCX ); -  cmp_reg32_imm( REG_RCX, PIKE_T_INT ); -  je( &label_C ); +  cmp_reg32_imm( REG_RCX, PIKE_T_INT ); je( &label_C ); +  mov_imm_reg( 1, REG_RAX ); +  shl_reg32_reg( REG_RAX, REG_RCX ); +  and_reg32_imm( REG_RAX, BIT_FUNCTION|BIT_OBJECT ); +  jnz( &label_A ); +  +  /* string, array, mapping or float. Always true */ +  amd64_add_sp(-1); +  amd64_free_svalue_type( sp_reg, REG_RCX, 0 ); +  mov_imm_reg( 1, REG_RBX ); +  jmp( &label_B ); +     LABEL_A; -  /* not an integer. Use svalue_is_true. */ +  /* function or object. Use svalue_is_true. */    add_reg_imm_reg(sp_reg, -sizeof(struct svalue), ARG1_REG );    amd64_call_c_function(svalue_is_true);    mov_reg_reg( REG_RAX, REG_RBX ); -  amd64_add_sp( -1 ); /* Pop the stack. */ -  amd64_free_svalue( sp_reg, 0 ); +  amd64_add_sp( -1 ); +  amd64_free_svalue( sp_reg, 0 ); /* Pop the stack. */    jmp( &label_B );       LABEL_C;    /* integer */    mov_mem_reg( sp_reg,    -sizeof(struct svalue)+OFFSETOF(svalue,u.integer),    REG_RBX );    amd64_add_sp(-1);       LABEL_B; /* Branch or not? */
pike.git/src/code/amd64.c:2152:    else if( type == PIKE_T_OBJECT || type == PIKE_T_FUNCTION )    call svalue_is_true(&local)    else    1 -> RAX       The tests are ordered assuming integers are most commonly    checked. That is not nessasarily true.    */    mov_sval_type( ARG1_REG, REG_RCX );    cmp_reg32_imm( REG_RCX, PIKE_T_INT ); je( &label_C ); - #if 0 -  cmp_reg32_imm( REG_RCX, PIKE_T_OBJECT ); je( &label_A ); -  cmp_reg32_imm( REG_RCX, PIKE_T_FUNCTION ); je( &label_A ); +  mov_imm_reg( 1, REG_RAX ); +  shl_reg32_reg( REG_RAX, REG_RCX ); +  and_reg32_imm( REG_RAX, BIT_FUNCTION|BIT_OBJECT ); +  jnz( &label_A );    /* Not object, int or function. Always true. */ -  +     mov_imm_reg( 1, REG_RAX );    jmp( &label_B ); - #endif +  LABEL_A;    amd64_call_c_function(svalue_is_true);    jmp( &label_B );       LABEL_C;    mov_mem_reg( ARG1_REG, OFFSETOF(svalue, u.integer ), REG_RAX );    /* integer. */       LABEL_B;    test_reg( REG_RAX );    if( op == F_BRANCH_IF_LOCAL )