pike.git / src / code / amd64.c

version» Context lines:

pike.git/src/code/amd64.c:773:      void amd64_init_interpreter_state(void)   {    instrs[F_CATCH - F_OFFSET].address = inter_return_opcode_F_CATCH;   }      void ins_f_byte(unsigned int b)   {    int flags;    void *addr; +  INT32 rel_addr = 0;    b-=F_OFFSET;   #ifdef PIKE_DEBUG    if(b>255)    Pike_error("Instruction too big %d\n",b);   #endif    maybe_update_pc();       flags = instrs[b].flags;       addr=instrs[b].address;    switch(b + F_OFFSET) {    case F_CATCH:    {    /* Special argument for the F_CATCH instruction. */ -  int ptr_offset = 0x20 - 0x03; -  size_t int_addr; +     addr = inter_return_opcode_F_CATCH; -  int_addr = (size_t)addr; -  /* We need to compensate for the size of the code to call -  * inter_return_opcode_F_CATCH. -  */ -  if (int_addr & ~0x7fffffffLL) { -  /* Adjust for an additional SHL. */ -  ptr_offset += 0x04; -  if (int_addr & ~0x3fffffff8LL) { -  Pike_error("Not supported yet.\n"); +  AMD64_LOAD_RIP32(0, ARG1_REG); /* Address for the POINTER. */ +  rel_addr = PIKE_PC;    } -  } -  AMD64_LOAD_RIP32(ptr_offset, ARG1_REG); /* Address for the POINTER. */ -  } +     break;    case F_UNDEFINED:    ins_debug_instr_prologue(b, 0, 0);    amd64_push_int(0, 1);    return;    case F_CONST0:    ins_debug_instr_prologue(b, 0, 0);    amd64_push_int(0, 0);    return;    case F_CONST1:
pike.git/src/code/amd64.c:856:    AMD64_MOVE_REG_TO_RELADDR(PIKE_MARK_SP_REG, Pike_interpreter_reg,    OFFSETOF(Pike_interpreter, mark_stack_pointer));    dirty_regs &= ~(1 << PIKE_MARK_SP_REG);    }    if (flags & I_UPDATE_SP) sp_reg = 0;    if (flags & I_UPDATE_M_SP) mark_sp_reg = 0;    if (flags & I_UPDATE_FP) fp_reg = 0;       amd64_call_c_function(addr);    if (instrs[b].flags & I_RETURN) { +  INT32 skip;    if ((b + F_OFFSET) == F_RETURN_IF_TRUE) {    /* Kludge. We must check if the ret addr is    * orig_addr + JUMP_EPILOGUE_SIZE. */    AMD64_LOAD_RIP32(JUMP_EPILOGUE_SIZE - 7, REG_RCX);    }    AMD64_CMP_REG_IMM32(REG_RAX, -1); -  AMD64_JNE(0x0f - 0x03); +  AMD64_JNE(0); +  skip = (INT32)PIKE_PC;    AMD64_POP(REG_RBX); // Stack padding.    AMD64_POP(REG_RBX);    AMD64_POP(REG_R12);    AMD64_POP(REG_R13);    AMD64_POP(REG_R14);    AMD64_POP(REG_R15);    AMD64_POP(REG_RBP);    AMD64_RET(); -  +  Pike_compiler->new_program->program[skip-1] = ((INT32)PIKE_PC) - skip;    if ((b + F_OFFSET) == F_RETURN_IF_TRUE) {    /* Kludge. We must check if the ret addr is    * orig_addr + JUMP_EPILOGUE_SIZE. */    AMD64_CMP_REG(REG_RAX, REG_RCX);    AMD64_JE(0x02);    AMD64_JUMP_REG(REG_RAX);    return;    }    }    if (flags & I_JUMP) {    AMD64_JUMP_REG(REG_RAX); -  +  +  if (b + F_OFFSET == F_CATCH) { +  upd_pointer(rel_addr - 4, PIKE_PC - rel_addr);    }    } -  + }      int amd64_ins_f_jump(unsigned int op, int backward_jump)   {    int flags;    void *addr;    int off = op - F_OFFSET;    int ret = -1;   #ifdef PIKE_DEBUG    if(off>255)    Pike_error("Instruction too big %d\n",off);
pike.git/src/code/amd64.c:934:    ret=DO_NOT_WARN( (INT32) PIKE_PC );    PUSH_INT(0);    return ret;    }       addr=instrs[off].address;    amd64_call_c_function(addr);    AMD64_TEST_REG(REG_RAX);       if (backward_jump) { +  INT32 skip;    add_to_program (0x74); /* jz rel8 */ -  add_to_program (9 + 1 + 4); -  amd64_call_c_function (branch_check_threads_etc); /* 9 bytes */ +  add_to_program (0); /* Bytes to skip. */ +  skip = (INT32)PIKE_PC; +  amd64_call_c_function (branch_check_threads_etc);    add_to_program (0xe9); /* jmp rel32 */    ret = DO_NOT_WARN ((INT32) PIKE_PC); -  PUSH_INT (0); /* 4 bytes */ +  PUSH_INT (0); +  /* Adjust the skip for the relative jump. */ +  Pike_compiler->new_program->program[skip-1] = ((INT32)PIKE_PC - skip);    }    else {   #if 0    if ((cpu_vendor == PIKE_CPU_VENDOR_AMD) &&    (op == F_LOOP || op == F_FOREACH)) {    /* FIXME: Slows down Intel PIII/x86 by 7%, speeds up Athlon XP by 22%. :P */    add_to_program (0x3e); /* Branch taken hint. */    }   #endif    add_to_program (0x0f); /* jnz rel32 */