7990392006-04-27Tor Edvardsson /* * Machine code generator for AMD64. */ #include "operators.h" #include "constants.h" #include "object.h" #include "builtin_functions.h"
5bac3e2014-08-07Per Hedbor /* These come from linux include files. */ #ifdef REG_RBX #undef REG_RAX #undef REG_RBX #undef REG_RCX #undef REG_RDX #undef REG_RSP #undef REG_RBP #undef REG_RSI #undef REG_RDI #undef REG_R8 #undef REG_R9 #undef REG_R10 #undef REG_R11 #undef REG_R12 #undef REG_R13 #undef REG_R14 #undef REG_R15 #endif #define REG_RAX MISSING_P_ #define REG_RBX MISSING_P_ #define REG_RCX MISSING_P_ #define REG_RDX MISSING_P_ #define REG_RSP MISSING_P_ #define REG_RBP MISSING_P_ #define REG_RSI MISSING_P_ #define REG_RDI MISSING_P_ #define REG_R8 MISSING_P_ #define REG_R9 MISSING_P_ #define REG_R10 MISSING_P_ #define REG_R11 MISSING_P_ #define REG_R12 MISSING_P_ #define REG_R13 MISSING_P_ #define REG_R14 MISSING_P_ #define REG_R15 MISSING_P_
7990392006-04-27Tor Edvardsson  /* Register encodings */
a137542013-10-08Per Hedbor enum amd64_reg {P_REG_RAX = 0, P_REG_RBX = 3, P_REG_RCX = 1, P_REG_RDX = 2, P_REG_RSP = 4, P_REG_RBP = 5, P_REG_RSI = 6, P_REG_RDI = 7, P_REG_R8 = 8, P_REG_R9 = 9, P_REG_R10 = 10, P_REG_R11 = 11, P_REG_R12 = 12, P_REG_R13 = 13, P_REG_R14 = 14, P_REG_R15 = 15, P_REG_INVALID = -1, P_REG_XMM0 = 0, P_REG_XMM1 = 1, P_REG_XMM2 = 2, P_REG_XMM3 = 3, P_REG_XMM4 = 4, P_REG_XMM5 = 5, P_REG_XMM6 = 6, P_REG_XMM7 = 7, P_REG_XMM8 = 8, P_REG_XMM9 = 9, P_REG_XMM10=10, P_REG_XMM11=11, P_REG_XMM12=12, P_REG_XMM13=13, P_REG_XMM14=14, P_REG_XMM15=15,
622ebc2012-06-25Per Hedbor  };
7990392006-04-27Tor Edvardsson 
a137542013-10-08Per Hedbor #define PIKE_MARK_SP_REG P_REG_R12 #define PIKE_SP_REG P_REG_R13 #define PIKE_FP_REG P_REG_R14 #define Pike_interpreter_reg P_REG_R15
d1fa802011-05-09Henrik Grubbström (Grubba)  #ifdef __NT__ /* From http://software.intel.com/en-us/articles/introduction-to-x64-assembly/ * * Note: Space for the arguments needs to be allocated on the stack as well. */
a137542013-10-08Per Hedbor #define ARG1_REG P_REG_RCX #define ARG2_REG P_REG_RDX #define ARG3_REG P_REG_R8 #define ARG4_REG P_REG_R9
d1fa802011-05-09Henrik Grubbström (Grubba) #else /* From SysV ABI for AMD64 draft 0.99.5. */
a137542013-10-08Per Hedbor #define ARG1_REG P_REG_RDI #define ARG2_REG P_REG_RSI #define ARG3_REG P_REG_RDX #define ARG4_REG P_REG_RCX #define ARG5_REG P_REG_R8 #define ARG6_REG P_REG_R9
d1fa802011-05-09Henrik Grubbström (Grubba) #endif
7990392006-04-27Tor Edvardsson 
c33ce92012-06-28Per Hedbor static enum amd64_reg sp_reg = -1, fp_reg = -1, mark_sp_reg = -1; static int dirty_regs = 0, ret_for_func = 0; ptrdiff_t amd64_prev_stored_pc = -1; /* PROG_PC at the last point Pike_fp->pc was updated. */
638a2c2012-07-16Per Hedbor static int branch_check_threads_update_etc = 0; static int store_pc = 0;
719c3a2012-06-12Per Hedbor 
15e8a02012-06-13Henrik Grubbström (Grubba) #define MAX_LABEL_USES 6
719c3a2012-06-12Per Hedbor struct label { int n_label_uses;
15e8a02012-06-13Henrik Grubbström (Grubba)  ptrdiff_t addr;
719c3a2012-06-12Per Hedbor  ptrdiff_t offset[MAX_LABEL_USES]; }; static void label( struct label *l ) { int i;
15e8a02012-06-13Henrik Grubbström (Grubba)  if (l->addr >= 0) Pike_fatal("Label reused.\n");
719c3a2012-06-12Per Hedbor  for( i=0; i<l->n_label_uses; i++ ) { int dist = PIKE_PC - (l->offset[i] + 1); if( dist > 0x7f || dist < -0x80 )
8886032012-06-25Per Hedbor  Pike_fatal("Branch %d too far\n", dist);
719c3a2012-06-12Per Hedbor  Pike_compiler->new_program->program[l->offset[i]] = dist; } l->n_label_uses = 0;
15e8a02012-06-13Henrik Grubbström (Grubba)  l->addr = PIKE_PC;
719c3a2012-06-12Per Hedbor }
99c1e92012-06-20Per Hedbor static void ib( char x ) { add_to_program( x ); }
945d4a2014-03-15Martin Nilsson #if 0
99c1e92012-06-20Per Hedbor static void iw( short x ) { add_to_program( x>>8 ); add_to_program( x ); }
945d4a2014-03-15Martin Nilsson #endif
99c1e92012-06-20Per Hedbor  static void id( int x ) { add_to_program( (x)&0xff ); add_to_program( (x>>8)&0xff ); add_to_program( (x>>16)&0xff ); add_to_program( (x>>24)&0xff ); } /* x86 opcodes */ #define opcode(X) ib(X)
719c3a2012-06-12Per Hedbor static void modrm( int mod, int r, int m ) {
6810702012-06-21Henrik Grubbström (Grubba)  if (r < 0) Pike_fatal("Using an invalid register(1)!\n"); if (m < 0) Pike_fatal("Using an invalid register(2)!\n");
99c1e92012-06-20Per Hedbor  ib( ((mod<<6) | ((r&0x7)<<3) | (m&0x7)) );
719c3a2012-06-12Per Hedbor } static void sib( int scale, int index, enum amd64_reg base ) {
99c1e92012-06-20Per Hedbor  ib( (scale<<6) | ((index&0x7)<<3) | (base&0x7) ); } static void modrm_sib( int mod, int r, int m ) { modrm( mod, r, m );
a137542013-10-08Per Hedbor  if( (m&7) == P_REG_RSP) sib(0, P_REG_RSP, P_REG_RSP );
719c3a2012-06-12Per Hedbor } static void rex( int w, enum amd64_reg r, int x, enum amd64_reg b ) { unsigned char res = 1<<6; /* bit 7, 5-4 == 0 */ if( w ) res |= 1<<3; if( r > 0x7 ) res |= 1<<2; if( x ) res |= 1<<1; if( b > 0x7 ) res |= 1<<0; if( res != (1<<6) ) add_to_program( res ); }
99c1e92012-06-20Per Hedbor static void offset_modrm_sib( int offset, int r, int m )
719c3a2012-06-12Per Hedbor {
99c1e92012-06-20Per Hedbor  /* OFFSET */ if( offset < -128 || offset > 127 ) { modrm_sib( 2, r, m ); id( offset ); }
a137542013-10-08Per Hedbor  else if( offset || (m&7) == P_REG_RSP || (m&7) == P_REG_RBP )
99c1e92012-06-20Per Hedbor  { modrm_sib( 1, r, m ); ib( offset ); } else { modrm_sib( 0, r, m ); }
719c3a2012-06-12Per Hedbor } static void ret() { opcode(0xc3); } static void push(enum amd64_reg reg ) {
6810702012-06-21Henrik Grubbström (Grubba)  if (reg < 0) Pike_fatal("Pushing invalid register.\n");
719c3a2012-06-12Per Hedbor  if (reg & 0x08) add_to_program(0x41); add_to_program(0x50 + (reg & 0x07)); } static void pop(enum amd64_reg reg ) {
6810702012-06-21Henrik Grubbström (Grubba)  if (reg < 0) Pike_fatal("Popping invalid register.\n");
719c3a2012-06-12Per Hedbor  if (reg & 0x08) add_to_program(0x41); add_to_program(0x58 + (reg & 0x07)); } static void mov_reg_reg(enum amd64_reg from_reg, enum amd64_reg to_reg ) { rex( 1, from_reg, 0, to_reg ); opcode( 0x89 ); modrm( 3, from_reg, to_reg ); }
7990392006-04-27Tor Edvardsson #define PUSH_INT(X) ins_int((INT32)(X), (void (*)(char))add_to_program)
6785ae2012-06-15Per Hedbor static void low_mov_mem_reg(enum amd64_reg from_reg, ptrdiff_t offset, enum amd64_reg to_reg)
719c3a2012-06-12Per Hedbor { opcode( 0x8b );
99c1e92012-06-20Per Hedbor  offset_modrm_sib(offset, to_reg, from_reg );
719c3a2012-06-12Per Hedbor } static void mov_mem_reg( enum amd64_reg from_reg, ptrdiff_t offset, enum amd64_reg to_reg ) { rex( 1, to_reg, 0, from_reg ); low_mov_mem_reg( from_reg, offset, to_reg ); }
99c1e92012-06-20Per Hedbor static void xor_reg_reg( enum amd64_reg reg1, enum amd64_reg reg2 )
719c3a2012-06-12Per Hedbor {
99c1e92012-06-20Per Hedbor  rex(1,reg1,0,reg2); opcode( 0x31 ); modrm(3,reg1,reg2);
719c3a2012-06-12Per Hedbor } static void and_reg_imm( enum amd64_reg reg, int imm32 ) { rex( 1, 0, 0, reg ); if( imm32 < -0x80 || imm32 > 0x7f ) {
a137542013-10-08Per Hedbor  if( reg == P_REG_RAX )
719c3a2012-06-12Per Hedbor  {
1c1a242012-06-13Henrik Grubbström (Grubba)  opcode( 0x25 ); /* AND rax,imm32 */
719c3a2012-06-12Per Hedbor  id( imm32 ); } else {
1c1a242012-06-13Henrik Grubbström (Grubba)  opcode( 0x81 ); /* AND REG,imm32 */
719c3a2012-06-12Per Hedbor  modrm( 3,4, reg); id( imm32 ); } } else {
1c1a242012-06-13Henrik Grubbström (Grubba)  add_to_program(0x83); /* AND REG,imm8 */
719c3a2012-06-12Per Hedbor  modrm( 3, 4, reg ); ib( imm32 ); } }
378ad02012-06-25Per Hedbor static void and_reg32_imm( enum amd64_reg reg, int imm32 ) { rex( 0, 0, 0, reg ); if( imm32 < -0x80 || imm32 > 0x7f ) {
a137542013-10-08Per Hedbor  if( reg == P_REG_RAX )
378ad02012-06-25Per Hedbor  { opcode( 0x25 ); /* AND rax,imm32 */ id( imm32 ); } else { opcode( 0x81 ); /* AND REG,imm32 */ modrm( 3,4, reg); id( imm32 ); } } else { add_to_program(0x83); /* AND REG,imm8 */ modrm( 3, 4, reg ); ib( imm32 ); } }
99c1e92012-06-20Per Hedbor static void mov_mem32_reg( enum amd64_reg from_reg, ptrdiff_t offset, enum amd64_reg to_reg ) { rex( 0, to_reg, 0, from_reg ); low_mov_mem_reg( from_reg, offset, to_reg ); }
719c3a2012-06-12Per Hedbor static void mov_mem16_reg( enum amd64_reg from_reg, ptrdiff_t offset, enum amd64_reg to_reg ) {
1af0932012-06-22Per Hedbor #if 0
719c3a2012-06-12Per Hedbor  mov_mem32_reg( from_reg, offset, to_reg );
99c1e92012-06-20Per Hedbor  and_reg_imm(to_reg, 0xffff);
1af0932012-06-22Per Hedbor #else rex( 1,to_reg,0,from_reg ); /* movzx r/m16 -> r32. This automatically zero-extends the upper 32-bit */ opcode( 0xf ); opcode( 0xb7 ); offset_modrm_sib(offset, to_reg, from_reg ); #endif } static void mov_mem8_reg( enum amd64_reg from_reg, ptrdiff_t offset, enum amd64_reg to_reg ) { #if 0 mov_mem32_reg( from_reg, offset, to_reg ); and_reg_imm(to_reg, 0xff); #else rex( 0,to_reg,0,from_reg ); /* movzx r/m8 -> r32. This automatically zero-extends the upper 32-bit */ opcode( 0xf ); opcode( 0xb6 ); offset_modrm_sib(offset, to_reg, from_reg ); #endif
719c3a2012-06-12Per Hedbor } static void add_reg_imm( enum amd64_reg src, int imm32); static void shl_reg_imm( enum amd64_reg from_reg, int shift ) { rex( 1, from_reg, 0, 0 ); if( shift == 1 ) {
b233f42012-06-17Henrik Grubbström (Grubba)  opcode( 0xd1 ); /* SAL */ modrm( 3, 4, from_reg );
719c3a2012-06-12Per Hedbor  } else { opcode( 0xc1 );
b233f42012-06-17Henrik Grubbström (Grubba)  modrm( 3, 4, from_reg );
719c3a2012-06-12Per Hedbor  ib( shift ); } }
945d4a2014-03-15Martin Nilsson #if 0
378ad02012-06-25Per Hedbor static void shl_reg_reg( enum amd64_reg reg, enum amd64_reg sreg) {
a137542013-10-08Per Hedbor  if( sreg != P_REG_RCX )
378ad02012-06-25Per Hedbor  Pike_fatal("Not supported\n"); rex( 1, 0, 0, reg ); opcode( 0xd3 ); modrm( 3, 4, reg ); }
945d4a2014-03-15Martin Nilsson #endif
378ad02012-06-25Per Hedbor  static void shl_reg32_reg( enum amd64_reg reg, enum amd64_reg sreg) {
a137542013-10-08Per Hedbor  if( sreg != P_REG_RCX )
378ad02012-06-25Per Hedbor  Pike_fatal("Not supported\n"); rex( 0, 0, 0, reg ); opcode( 0xd3 ); modrm( 3, 4, reg ); }
945d4a2014-03-15Martin Nilsson #if 0
378ad02012-06-25Per Hedbor static void shl_reg_mem( enum amd64_reg reg, enum amd64_reg mem, int offset) {
a137542013-10-08Per Hedbor  if( reg == P_REG_RCX )
378ad02012-06-25Per Hedbor  Pike_fatal("Not supported\n");
a137542013-10-08Per Hedbor  mov_mem8_reg( mem, offset, P_REG_RCX ); shl_reg_reg( reg, P_REG_RCX );
378ad02012-06-25Per Hedbor }
945d4a2014-03-15Martin Nilsson #endif
378ad02012-06-25Per Hedbor 
1af0932012-06-22Per Hedbor static void shr_reg_imm( enum amd64_reg from_reg, int shift ) { rex( 1, from_reg, 0, 0 ); if( shift == 1 ) { opcode( 0xd1 ); /* SAR */ modrm( 3, 7, from_reg ); } else { opcode( 0xc1 ); modrm( 3, 7, from_reg ); ib( shift ); } }
719c3a2012-06-12Per Hedbor static void clear_reg( enum amd64_reg reg ) { xor_reg_reg( reg, reg ); }
945d4a2014-03-15Martin Nilsson #if 0
6785ae2012-06-15Per Hedbor static void neg_reg( enum amd64_reg reg ) { rex(1,0,0,reg); opcode(0xf7); modrm(3,3,reg); }
945d4a2014-03-15Martin Nilsson #endif
6785ae2012-06-15Per Hedbor 
719c3a2012-06-12Per Hedbor static void mov_imm_reg( long imm, enum amd64_reg reg ) { if( (imm > 0x7fffffffLL) || (imm < -0x80000000LL) ) { rex(1,0,0,reg); opcode(0xb8 | (reg&0x7)); /* mov imm64 -> reg64 */ id( (imm & 0xffffffffLL) ); id( ((imm >> 32)&0xffffffffLL) ); }
378ad02012-06-25Per Hedbor  else if( imm > 0 ) { rex(0,0,0,reg); opcode( 0xc7 ); /* mov imm32 -> reg/m 32, SE*/ modrm( 3,0,reg ); id( (int)imm ); }
719c3a2012-06-12Per Hedbor  else { rex(1,0,0,reg); opcode( 0xc7 ); /* mov imm32 -> reg/m 64, SE*/ modrm( 3,0,reg ); id( (int)imm ); } }
cf3be62014-07-15Per Hedbor /* static void mov_ptr_reg( void *x, enum amd64_reg reg ) */ /* { */ /* mov_imm_reg( (long)x, reg ); */ /* } */
622ebc2012-06-25Per Hedbor static void mov_mem128_reg( enum amd64_reg from_reg, int offset, enum amd64_reg to_reg ) { if( from_reg > 7 ) Pike_fatal("Not supported\n"); rex(0,to_reg,0,from_reg); opcode( 0x66 ); opcode( 0x0f ); opcode( 0x6f ); /* MOVDQA xmm,m128 */ offset_modrm_sib( offset, to_reg, from_reg ); } static void mov_reg_mem128( enum amd64_reg from_reg, enum amd64_reg to_reg, int offset ) { if( from_reg > 7 ) Pike_fatal("Not supported\n"); rex(0,from_reg,0,to_reg); opcode( 0x66 ); opcode( 0x0f ); opcode( 0x7f ); /* MOVDQA m128,xmm */ offset_modrm_sib( offset, from_reg, to_reg ); }
99c1e92012-06-20Per Hedbor static void low_mov_reg_mem(enum amd64_reg from_reg, enum amd64_reg to_reg, ptrdiff_t offset ) { opcode( 0x89 ); offset_modrm_sib( offset, from_reg, to_reg ); }
719c3a2012-06-12Per Hedbor static void mov_reg_mem( enum amd64_reg from_reg, enum amd64_reg to_reg, ptrdiff_t offset ) { rex(1, from_reg, 0, to_reg );
99c1e92012-06-20Per Hedbor  low_mov_reg_mem( from_reg, to_reg, offset ); }
719c3a2012-06-12Per Hedbor 
99c1e92012-06-20Per Hedbor static void mov_reg_mem32( enum amd64_reg from_reg, enum amd64_reg to_reg, ptrdiff_t offset ) { rex(0, from_reg, 0, to_reg ); low_mov_reg_mem( from_reg, to_reg, offset ); }
945d4a2014-03-15Martin Nilsson #if 0
99c1e92012-06-20Per Hedbor static void mov_reg_mem16( enum amd64_reg from_reg, enum amd64_reg to_reg, ptrdiff_t offset ) { opcode( 0x66 ); rex(0, from_reg, 0, to_reg ); low_mov_reg_mem( from_reg, to_reg, offset );
719c3a2012-06-12Per Hedbor }
945d4a2014-03-15Martin Nilsson #endif
719c3a2012-06-12Per Hedbor 
99c1e92012-06-20Per Hedbor 
719c3a2012-06-12Per Hedbor static void mov_imm_mem( long imm, enum amd64_reg to_reg, ptrdiff_t offset ) { if( imm >= -0x80000000LL && imm <= 0x7fffffffLL ) { rex( 1, 0, 0, to_reg ); opcode( 0xc7 ); /* mov imm32 -> r/m64 (sign extend)*/
99c1e92012-06-20Per Hedbor  offset_modrm_sib( offset, 0, to_reg );
719c3a2012-06-12Per Hedbor  id( imm ); } else {
a137542013-10-08Per Hedbor  if( to_reg == P_REG_RAX ) Pike_fatal( "Clobbered TMP P_REG_RAX reg\n"); mov_imm_reg( imm, P_REG_RAX ); mov_reg_mem( P_REG_RAX, to_reg, offset );
719c3a2012-06-12Per Hedbor  } } static void mov_imm_mem32( int imm, enum amd64_reg to_reg, ptrdiff_t offset ) { rex( 0, 0, 0, to_reg ); opcode( 0xc7 ); /* mov imm32 -> r/m32 (sign extend)*/ /* This does not work for rg&7 == 4 or 5. */
99c1e92012-06-20Per Hedbor  offset_modrm_sib( offset, 0, to_reg );
719c3a2012-06-12Per Hedbor  id( imm ); }
99c1e92012-06-20Per Hedbor static void sub_reg_imm( enum amd64_reg reg, int imm32 );
719c3a2012-06-12Per Hedbor static void add_reg_imm( enum amd64_reg reg, int imm32 ) { if( !imm32 ) return;
99c1e92012-06-20Per Hedbor  if( imm32 < 0 ) { /* This makes the disassembly easier to read. */ sub_reg_imm( reg, -imm32 ); return; }
719c3a2012-06-12Per Hedbor  rex( 1, 0, 0, reg ); if( imm32 < -0x80 || imm32 > 0x7f ) {
a137542013-10-08Per Hedbor  if( reg == P_REG_RAX )
719c3a2012-06-12Per Hedbor  {
1c1a242012-06-13Henrik Grubbström (Grubba)  opcode( 0x05 ); /* ADD rax,imm32 */
719c3a2012-06-12Per Hedbor  id( imm32 ); } else {
1c1a242012-06-13Henrik Grubbström (Grubba)  opcode( 0x81 ); /* ADD REG,imm32 */
719c3a2012-06-12Per Hedbor  modrm( 3, 0, reg); id( imm32 ); } } else {
1c1a242012-06-13Henrik Grubbström (Grubba)  add_to_program(0x83); /* ADD REG,imm8 */
719c3a2012-06-12Per Hedbor  modrm( 3, 0, reg ); ib( imm32 ); } }
99c1e92012-06-20Per Hedbor  static void low_add_mem_imm(int w, enum amd64_reg reg, int offset, int imm32 )
719c3a2012-06-12Per Hedbor { int r2 = imm32 == -1 ? 1 : 0; int large = 0; if( !imm32 ) return;
99c1e92012-06-20Per Hedbor  rex( w, 0, 0, reg );
719c3a2012-06-12Per Hedbor  if( r2 ) imm32 = -imm32; if( imm32 == 1 )
1c1a242012-06-13Henrik Grubbström (Grubba)  opcode( 0xff ); /* INCL(DECL) r/m32 */
719c3a2012-06-12Per Hedbor  else if( imm32 >= -128 && imm32 < 128 )
1c1a242012-06-13Henrik Grubbström (Grubba)  opcode( 0x83 ); /* ADD imm8,r/m32 */
719c3a2012-06-12Per Hedbor  else {
1c1a242012-06-13Henrik Grubbström (Grubba)  opcode( 0x81 ); /* ADD imm32,r/m32 */
719c3a2012-06-12Per Hedbor  large = 1; }
99c1e92012-06-20Per Hedbor  offset_modrm_sib( offset, r2, reg );
719c3a2012-06-12Per Hedbor  if( imm32 != 1 ) { if( large ) id( imm32 ); else ib( imm32 ); } }
99c1e92012-06-20Per Hedbor static void add_mem32_imm( enum amd64_reg reg, int offset, int imm32 )
719c3a2012-06-12Per Hedbor {
99c1e92012-06-20Per Hedbor  low_add_mem_imm( 0, reg, offset, imm32 );
6785ae2012-06-15Per Hedbor }
99c1e92012-06-20Per Hedbor static void add_mem_imm( enum amd64_reg reg, int offset, int imm32 )
6785ae2012-06-15Per Hedbor {
99c1e92012-06-20Per Hedbor  low_add_mem_imm( 1, reg, offset, imm32 );
6785ae2012-06-15Per Hedbor }
405ace2014-08-08Per Hedbor #ifndef USE_VALGRIND
df56ed2014-07-15Per Hedbor static void add_mem8_imm( enum amd64_reg reg, int offset, int imm32 ) { int r2 = imm32 == -1 ? 1 : 0; if( !imm32 ) return; rex( 0, 0, 0, reg ); if( imm32 == 1 || imm32 == -1 ) opcode( 0xfe ); /* INCL r/m8 */ else if( imm32 >= -128 && imm32 < 128 ) opcode( 0x80 ); /* ADD imm8,r/m32 */ else Pike_fatal("Not sensible"); offset_modrm_sib( offset, r2, reg ); if( imm32 != 1 && !r2 ) { ib( imm32 ); } }
405ace2014-08-08Per Hedbor #endif
719c3a2012-06-12Per Hedbor  static void sub_reg_imm( enum amd64_reg reg, int imm32 ) { if( !imm32 ) return;
99c1e92012-06-20Per Hedbor  if( imm32 < 0 ) /* This makes the disassembly easier to read. */ return add_reg_imm( reg, -imm32 );
719c3a2012-06-12Per Hedbor  rex( 1, 0, 0, reg ); if( imm32 < -0x80 || imm32 > 0x7f ) {
a137542013-10-08Per Hedbor  if( reg == P_REG_RAX )
719c3a2012-06-12Per Hedbor  {
1c1a242012-06-13Henrik Grubbström (Grubba)  opcode( 0x2d ); /* SUB rax,imm32 */
719c3a2012-06-12Per Hedbor  id( imm32 ); } else {
1c1a242012-06-13Henrik Grubbström (Grubba)  opcode( 0x81 ); /* SUB REG,imm32 */
719c3a2012-06-12Per Hedbor  modrm( 3, 5, reg); id( imm32 ); } } else {
1c1a242012-06-13Henrik Grubbström (Grubba)  opcode(0x83); /* SUB REG,imm8 */
719c3a2012-06-12Per Hedbor  modrm( 3, 5, reg ); ib( imm32 ); } } static void test_reg_reg( enum amd64_reg reg1, enum amd64_reg reg2 ) { rex(1,reg1,0,reg2); opcode(0x85); modrm(3, reg1, reg2 ); } static void test_reg( enum amd64_reg reg1 ) { test_reg_reg( reg1, reg1 ); } static void cmp_reg_imm( enum amd64_reg reg, int imm32 ) { rex(1, 0, 0, reg); if( imm32 > 0x7f || imm32 < -0x80 ) {
a137542013-10-08Per Hedbor  if( reg == P_REG_RAX )
719c3a2012-06-12Per Hedbor  { opcode( 0x3d ); id( imm32 ); } else { opcode( 0x81 ); modrm(3,7,reg); id( imm32 ); } } else { opcode( 0x83 ); modrm( 3,7,reg); ib( imm32 ); } }
4ceb792012-06-22Per Hedbor static void cmp_reg32_imm( enum amd64_reg reg, int imm32 ) { rex(0, 0, 0, reg); if( imm32 > 0x7f || imm32 < -0x80 ) {
a137542013-10-08Per Hedbor  if( reg == P_REG_RAX )
4ceb792012-06-22Per Hedbor  { opcode( 0x3d ); id( imm32 ); } else { opcode( 0x81 ); modrm(3,7,reg); id( imm32 ); } } else { opcode( 0x83 ); modrm( 3,7,reg); ib( imm32 ); } }
719c3a2012-06-12Per Hedbor static void cmp_reg_reg( enum amd64_reg reg1, enum amd64_reg reg2 ) {
8422c42013-06-19Henrik Grubbström (Grubba)  rex(1, reg2, 0, reg1);
719c3a2012-06-12Per Hedbor  opcode( 0x39 );
8422c42013-06-19Henrik Grubbström (Grubba)  modrm( 3, reg2, reg1 );
719c3a2012-06-12Per Hedbor }
11619b2012-07-18Henrik Grubbström (Grubba) static int jmp_rel_imm32( int addr )
719c3a2012-06-12Per Hedbor {
11619b2012-07-18Henrik Grubbström (Grubba)  int rel = addr - (PIKE_PC + 5); // counts from the next instruction
719c3a2012-06-12Per Hedbor  int res; opcode( 0xe9 ); res = PIKE_PC; id( rel ); return res; }
11619b2012-07-18Henrik Grubbström (Grubba) static void jmp_rel_imm( int addr )
719c3a2012-06-12Per Hedbor {
11619b2012-07-18Henrik Grubbström (Grubba)  int rel = addr - (PIKE_PC + 2); // counts from the next instruction
719c3a2012-06-12Per Hedbor  if(rel >= -0x80 && rel <= 0x7f ) { opcode( 0xeb ); ib( rel ); return; }
11619b2012-07-18Henrik Grubbström (Grubba)  jmp_rel_imm32( addr );
719c3a2012-06-12Per Hedbor }
11619b2012-07-18Henrik Grubbström (Grubba) static void call_rel_imm32( int addr )
6785ae2012-06-15Per Hedbor {
11619b2012-07-18Henrik Grubbström (Grubba)  int rel = addr - (PIKE_PC + 5); // counts from the next instruction
6785ae2012-06-15Per Hedbor  opcode( 0xe8 ); id( rel );
84c5f62012-06-28Per Hedbor  sp_reg = -1;
6785ae2012-06-15Per Hedbor  return; }
719c3a2012-06-12Per Hedbor static void jmp_reg( enum amd64_reg reg ) { rex(0,reg,0,0); opcode( 0xff ); modrm( 3, 4, reg ); } static void call_reg( enum amd64_reg reg ) { rex(0,reg,0,0); opcode( 0xff ); modrm( 3, 2, reg ); } static void call_imm( void *ptr ) { size_t addr = (size_t)ptr; if( (addr & ~0x7fffffffLL) && !(addr & ~0x3fffffff8LL) ) {
a137542013-10-08Per Hedbor  mov_imm_reg( addr>>3, P_REG_RAX); shl_reg_imm( P_REG_RAX, 3 );
719c3a2012-06-12Per Hedbor  } else {
a137542013-10-08Per Hedbor  mov_imm_reg(addr, P_REG_RAX );
719c3a2012-06-12Per Hedbor  }
a137542013-10-08Per Hedbor  call_reg( P_REG_RAX );
719c3a2012-06-12Per Hedbor }
6785ae2012-06-15Per Hedbor /* reg += reg2 */ static void add_reg_reg( enum amd64_reg reg, enum amd64_reg reg2 ) { rex(1,reg2,0,reg); opcode( 0x1 ); modrm( 3, reg2, reg ); } /* reg -= reg2 */ static void sub_reg_reg( enum amd64_reg reg, enum amd64_reg reg2 ) { rex(1,reg2,0,reg); opcode( 0x29 ); modrm( 3, reg2, reg ); } /* dst += *(src+off) */ static void add_reg_mem( enum amd64_reg dst, enum amd64_reg src, long off ) { rex(1,dst,0,src); opcode( 0x3 );
99c1e92012-06-20Per Hedbor  offset_modrm_sib( off, dst, src );
6785ae2012-06-15Per Hedbor } /* dst -= *(src+off) */ static void sub_reg_mem( enum amd64_reg dst, enum amd64_reg src, long off ) { rex(1,dst,0,src); opcode( 0x2b );
99c1e92012-06-20Per Hedbor  offset_modrm_sib( off, dst, src );
6785ae2012-06-15Per Hedbor }
719c3a2012-06-12Per Hedbor  /* dst = src + imm32 (LEA, does not set flags!) */ static void add_reg_imm_reg( enum amd64_reg src, long imm32, enum amd64_reg dst ) { if( imm32 > 0x7fffffffLL || imm32 <-0x80000000LL) Pike_fatal("LEA [reg+imm] > 32bit Not supported\n"); if( src == dst ) { if( !imm32 ) return; add_reg_imm( src, imm32 ); } else { if( !imm32 ) { mov_reg_reg( src, dst ); return; } rex(1,dst,0,src); opcode( 0x8d ); /* LEA r64,m */
99c1e92012-06-20Per Hedbor  offset_modrm_sib( imm32, dst, src );
719c3a2012-06-12Per Hedbor  } } /* load code adress + imm to reg, always 32bit offset */ static void mov_rip_imm_reg( int imm, enum amd64_reg reg ) {
1c1a242012-06-13Henrik Grubbström (Grubba)  imm -= 7; /* The size of this instruction. */
719c3a2012-06-12Per Hedbor  rex( 1, reg, 0, 0 );
1c1a242012-06-13Henrik Grubbström (Grubba)  opcode( 0x8d ); /* LEA */
719c3a2012-06-12Per Hedbor  modrm( 0, reg, 5 ); id( imm ); } static void add_imm_mem( int imm32, enum amd64_reg reg, int offset ) { int r2 = (imm32 == -1) ? 1 : 0; int large = 0; /* OPCODE */ rex( 1, 0, 0, reg ); if( imm32 == 1 || imm32 == -1 )
1c1a242012-06-13Henrik Grubbström (Grubba)  opcode( 0xff ); /* INCL(decl) r/m32 */
719c3a2012-06-12Per Hedbor  else if( -128 <= imm32 && 128 > imm32 )
1c1a242012-06-13Henrik Grubbström (Grubba)  opcode( 0x83 ); /* ADD imm8,r/m32 */
719c3a2012-06-12Per Hedbor  else {
1c1a242012-06-13Henrik Grubbström (Grubba)  opcode( 0x81 ); /* ADD imm32,r/m32 */
719c3a2012-06-12Per Hedbor  large = 1; }
99c1e92012-06-20Per Hedbor  offset_modrm_sib( offset, r2, reg );
719c3a2012-06-12Per Hedbor  /* VALUE */ if( imm32 != 1 && !r2 ) { if( large ) id( imm32 ); else ib( imm32 ); } } static void jump_rel8( struct label *res, unsigned char op ) { opcode( op );
7990392006-04-27Tor Edvardsson 
15e8a02012-06-13Henrik Grubbström (Grubba)  if (res->addr >= 0) { ib(res->addr - (PIKE_PC+1)); return; }
719c3a2012-06-12Per Hedbor  if( res->n_label_uses >= MAX_LABEL_USES ) Pike_fatal( "Label used too many times\n" ); res->offset[res->n_label_uses] = PIKE_PC; res->n_label_uses++; ib(0); } static int jnz_imm_rel32( int rel ) { int res; opcode( 0xf ); opcode( 0x85 ); res = PIKE_PC; id( rel ); return res; } static int jz_imm_rel32( int rel ) { int res; opcode( 0xf ); opcode( 0x84 ); res = PIKE_PC; id( rel ); return res; } #define jne(X) jnz(X) #define je(X) jz(X) static void jmp( struct label *l ) { return jump_rel8( l, 0xeb ); } static void jo( struct label *l ) { return jump_rel8( l, 0x70 ); }
6785ae2012-06-15Per Hedbor static void jno( struct label *l ) { return jump_rel8( l, 0x71 ); }
945d4a2014-03-15Martin Nilsson #if 0
91080b2012-06-15Henrik Grubbström (Grubba) static void jc( struct label *l ) { return jump_rel8( l, 0x72 ); } static void jnc( struct label *l ) { return jump_rel8( l, 0x73 ); }
945d4a2014-03-15Martin Nilsson #endif
6785ae2012-06-15Per Hedbor static void jz( struct label *l ) { return jump_rel8( l, 0x74 ); } static void jnz( struct label *l ) { return jump_rel8( l, 0x75 ); }
8886032012-06-25Per Hedbor static void js( struct label *l ) { return jump_rel8( l, 0x78 ); } static void jl( struct label *l ) { return jump_rel8( l, 0x7c ); } static void jge( struct label *l ) { return jump_rel8( l, 0x7d ); } static void jle( struct label *l ) { return jump_rel8( l, 0x7e ); }
945d4a2014-03-15Martin Nilsson #if 0
8886032012-06-25Per Hedbor static void jg( struct label *l ) { return jump_rel8( l, 0x7f ); }
945d4a2014-03-15Martin Nilsson #endif
719c3a2012-06-12Per Hedbor 
378ad02012-06-25Per Hedbor #define LABELS() struct label label_A, label_B, label_C, label_D, label_E;label_A.addr = -1;label_A.n_label_uses = 0;label_B.addr = -1;label_B.n_label_uses = 0;label_C.addr = -1;label_C.n_label_uses = 0;label_D.addr = -1;label_D.n_label_uses = 0;label_E.addr=-1;label_E.n_label_uses=0;
719c3a2012-06-12Per Hedbor #define LABEL_A label(&label_A) #define LABEL_B label(&label_B) #define LABEL_C label(&label_C)
6785ae2012-06-15Per Hedbor #define LABEL_D label(&label_D)
378ad02012-06-25Per Hedbor #define LABEL_E label(&label_E)
d1fa802011-05-09Henrik Grubbström (Grubba) 
638a2c2012-07-16Per Hedbor static void amd64_ins_branch_check_threads_etc(int code_only); static int func_start = 0; /* Called at the start of a function. */ void amd64_start_function(int store_lines ) { store_pc = store_lines; branch_check_threads_update_etc = 0; /* * store_lines is true for any non-constant evaluation function * In reality we should only do this if we are going to be using * check_threads_etc (ie, if we have a loop). * * It does not really waste all that much space in the code, though. * * We could really do this one time per program instead of once per * function. * * But it is very hard to know when a new program starts, and * PIKE_PC is not reliable as a marker since constant evaluations * are also done using this code. So for now, waste ~20 bytes per * function in the program code. */ if( store_lines ) amd64_ins_branch_check_threads_etc(1); func_start = PIKE_PC; } /* Called when the current function is done */
57ed3f2012-12-30Jonas Walldén void amd64_end_function(int UNUSED(no_pc))
638a2c2012-07-16Per Hedbor { branch_check_threads_update_etc = 0; }
d1fa802011-05-09Henrik Grubbström (Grubba) /* Machine code entry prologue. * * On entry: * RDI: Pike_interpreter (ARG1_REG) * * During interpreting:
639a932011-05-15Henrik Grubbström (Grubba)  * R15: Pike_interpreter
d1fa802011-05-09Henrik Grubbström (Grubba)  */ void amd64_ins_entry(void) { /* Push all registers that the ABI requires to be preserved. */
a137542013-10-08Per Hedbor  push(P_REG_RBP); mov_reg_reg(P_REG_RSP, P_REG_RBP); push(P_REG_R15); push(P_REG_R14); push(P_REG_R13); push(P_REG_R12); push(P_REG_RBX); sub_reg_imm(P_REG_RSP, 8); /* Align on 16 bytes. */
719c3a2012-06-12Per Hedbor  mov_reg_reg(ARG1_REG, Pike_interpreter_reg);
639a932011-05-15Henrik Grubbström (Grubba)  amd64_flush_code_generator_state();
d1fa802011-05-09Henrik Grubbström (Grubba) }
54a26b2011-05-11Henrik Grubbström (Grubba) void amd64_flush_code_generator_state(void) {
6810702012-06-21Henrik Grubbström (Grubba)  sp_reg = -1; fp_reg = -1;
638a2c2012-07-16Per Hedbor  ret_for_func = 0;
6810702012-06-21Henrik Grubbström (Grubba)  mark_sp_reg = -1;
54a26b2011-05-11Henrik Grubbström (Grubba)  dirty_regs = 0; amd64_prev_stored_pc = -1; }
9030f62011-05-26Henrik Grubbström (Grubba) static void flush_dirty_regs(void) { /* NB: PIKE_FP_REG is currently never dirty. */ if (dirty_regs & (1 << PIKE_SP_REG)) {
719c3a2012-06-12Per Hedbor  mov_reg_mem(PIKE_SP_REG, Pike_interpreter_reg,
d97eb72011-07-10Henrik Grubbström (Grubba)  OFFSETOF(Pike_interpreter_struct, stack_pointer));
9030f62011-05-26Henrik Grubbström (Grubba)  dirty_regs &= ~(1 << PIKE_SP_REG); } if (dirty_regs & (1 << PIKE_MARK_SP_REG)) {
719c3a2012-06-12Per Hedbor  mov_reg_mem(PIKE_MARK_SP_REG, Pike_interpreter_reg,
d97eb72011-07-10Henrik Grubbström (Grubba)  OFFSETOF(Pike_interpreter_struct, mark_stack_pointer));
9030f62011-05-26Henrik Grubbström (Grubba)  dirty_regs &= ~(1 << PIKE_MARK_SP_REG); } }
7990392006-04-27Tor Edvardsson 
54a26b2011-05-11Henrik Grubbström (Grubba) /* NB: We load Pike_fp et al into registers that * are persistent across function calls. */ void amd64_load_fp_reg(void) {
6810702012-06-21Henrik Grubbström (Grubba)  if (fp_reg < 0) {
719c3a2012-06-12Per Hedbor  mov_mem_reg(Pike_interpreter_reg,
6785ae2012-06-15Per Hedbor  OFFSETOF(Pike_interpreter_struct, frame_pointer), PIKE_FP_REG);
54a26b2011-05-11Henrik Grubbström (Grubba)  fp_reg = PIKE_FP_REG; } } void amd64_load_sp_reg(void) {
6810702012-06-21Henrik Grubbström (Grubba)  if (sp_reg < 0) {
719c3a2012-06-12Per Hedbor  mov_mem_reg(Pike_interpreter_reg,
6785ae2012-06-15Per Hedbor  OFFSETOF(Pike_interpreter_struct, stack_pointer), PIKE_SP_REG);
54a26b2011-05-11Henrik Grubbström (Grubba)  sp_reg = PIKE_SP_REG; } } void amd64_load_mark_sp_reg(void) {
6810702012-06-21Henrik Grubbström (Grubba)  if (mark_sp_reg < 0) {
719c3a2012-06-12Per Hedbor  mov_mem_reg(Pike_interpreter_reg,
6785ae2012-06-15Per Hedbor  OFFSETOF(Pike_interpreter_struct, mark_stack_pointer), PIKE_MARK_SP_REG);
54a26b2011-05-11Henrik Grubbström (Grubba)  mark_sp_reg = PIKE_MARK_SP_REG; } }
d1fa802011-05-09Henrik Grubbström (Grubba) static void update_arg1(INT32 value) {
719c3a2012-06-12Per Hedbor  mov_imm_reg(value, ARG1_REG);
d1fa802011-05-09Henrik Grubbström (Grubba)  /* FIXME: Alloc stack space on NT. */ }
7990392006-04-27Tor Edvardsson 
d1fa802011-05-09Henrik Grubbström (Grubba) static void update_arg2(INT32 value) {
719c3a2012-06-12Per Hedbor  mov_imm_reg(value, ARG2_REG);
d1fa802011-05-09Henrik Grubbström (Grubba)  /* FIXME: Alloc stack space on NT. */
7990392006-04-27Tor Edvardsson }
719c3a2012-06-12Per Hedbor static void amd64_add_sp( int num ) { amd64_load_sp_reg();
6785ae2012-06-15Per Hedbor  add_reg_imm( sp_reg, sizeof(struct svalue)*num);
719c3a2012-06-12Per Hedbor  dirty_regs |= 1 << PIKE_SP_REG;
1af0932012-06-22Per Hedbor  flush_dirty_regs(); /* FIXME: Need to change LABEL handling to remove this */
719c3a2012-06-12Per Hedbor } static void amd64_add_mark_sp( int num ) { amd64_load_mark_sp_reg(); add_reg_imm( mark_sp_reg, sizeof(struct svalue*)*num); dirty_regs |= 1 << PIKE_MARK_SP_REG;
1af0932012-06-22Per Hedbor  flush_dirty_regs(); /* FIXME: Need to change LABEL handling to remove this */
719c3a2012-06-12Per Hedbor }
a137542013-10-08Per Hedbor /* Note: Uses RAX and RCX internally. reg MUST not be P_REG_RAX. */
378ad02012-06-25Per Hedbor static void amd64_push_svaluep_to(int reg, int spoff)
9030f62011-05-26Henrik Grubbström (Grubba) {
719c3a2012-06-12Per Hedbor  LABELS();
a137542013-10-08Per Hedbor  if( reg == P_REG_RAX )
378ad02012-06-25Per Hedbor  Pike_fatal("Using RAX in push_svaluep not supported\n" );
9030f62011-05-26Henrik Grubbström (Grubba)  amd64_load_sp_reg();
73d2d42014-06-24Henrik Grubbström (Grubba)  mov_mem_reg(reg, OFFSETOF(svalue, tu.t.type), P_REG_RAX);
a137542013-10-08Per Hedbor  mov_mem_reg(reg, OFFSETOF(svalue, u.refs), P_REG_RCX);
73d2d42014-06-24Henrik Grubbström (Grubba)  mov_reg_mem(P_REG_RAX, sp_reg, spoff*sizeof(struct svalue)+OFFSETOF(svalue, tu.t.type)); mov_reg_mem(P_REG_RCX, sp_reg, spoff*sizeof(struct svalue)+OFFSETOF(svalue, u.refs));
023b792014-08-08Per Hedbor  and_reg32_imm(P_REG_RAX, MIN_REF_TYPE); jz(&label_A);
a137542013-10-08Per Hedbor  add_imm_mem( 1, P_REG_RCX, OFFSETOF(pike_string, refs));
719c3a2012-06-12Per Hedbor  LABEL_A;
378ad02012-06-25Per Hedbor } static void amd64_push_svaluep(int reg) { amd64_push_svaluep_to( reg, 0 );
719c3a2012-06-12Per Hedbor  amd64_add_sp( 1 );
9030f62011-05-26Henrik Grubbström (Grubba) }
eb42a12011-05-11Henrik Grubbström (Grubba) static void amd64_push_int(INT64 value, int subtype) { amd64_load_sp_reg();
73d2d42014-06-24Henrik Grubbström (Grubba)  mov_imm_mem((subtype<<16) + PIKE_T_INT, sp_reg, OFFSETOF(svalue, tu.t.type));
719c3a2012-06-12Per Hedbor  mov_imm_mem(value, sp_reg, OFFSETOF(svalue, u.integer)); amd64_add_sp( 1 ); } static void amd64_push_int_reg(enum amd64_reg reg ) { amd64_load_sp_reg();
73d2d42014-06-24Henrik Grubbström (Grubba)  mov_imm_mem( PIKE_T_INT, sp_reg, OFFSETOF(svalue, tu.t.type));
719c3a2012-06-12Per Hedbor  mov_reg_mem( reg, sp_reg, OFFSETOF(svalue, u.integer)); amd64_add_sp( 1 );
eb42a12011-05-11Henrik Grubbström (Grubba) }
d1fa802011-05-09Henrik Grubbström (Grubba) 
b63e282014-08-07Per Hedbor static void amd64_get_storage( enum amd64_reg reg, ptrdiff_t offset ) { amd64_load_fp_reg(); #if 0 /* Note: We really should keep pike_frame->current_storage up to date instead.. */ mov_mem_reg( fp_reg, OFFSETOF(pike_frame,current_storage), P_REG_RAX ); add_reg_imm( reg, offset ); #else /* fp->current_object->storage */ mov_mem_reg(fp_reg, OFFSETOF(pike_frame, current_object), P_REG_RAX); mov_mem_reg(P_REG_RAX, OFFSETOF(object,storage), reg ); mov_mem_reg(fp_reg, OFFSETOF(pike_frame, context), P_REG_RAX); /* + fp->context->storage_offset */ add_reg_mem( reg, P_REG_RAX, OFFSETOF(inherit,storage_offset) ); /* + offset */ add_reg_imm( reg, offset); #endif }
ab7cf52011-05-24Henrik Grubbström (Grubba) static void amd64_mark(int offset) { amd64_load_sp_reg(); amd64_load_mark_sp_reg(); if (offset) {
a137542013-10-08Per Hedbor  add_reg_imm_reg(sp_reg, -offset * sizeof(struct svalue), P_REG_RAX); mov_reg_mem(P_REG_RAX, mark_sp_reg, 0);
ab7cf52011-05-24Henrik Grubbström (Grubba)  } else {
719c3a2012-06-12Per Hedbor  mov_reg_mem(sp_reg, mark_sp_reg, 0);
ab7cf52011-05-24Henrik Grubbström (Grubba)  }
719c3a2012-06-12Per Hedbor  amd64_add_mark_sp( 1 ); } static void mov_sval_type(enum amd64_reg src, enum amd64_reg dst ) {
73d2d42014-06-24Henrik Grubbström (Grubba)  mov_mem8_reg( src, OFFSETOF(svalue, tu.t.type), dst);
622ebc2012-06-25Per Hedbor /* and_reg32_imm( dst, 0x1f );*/
719c3a2012-06-12Per Hedbor } static void amd64_call_c_function(void *addr) { flush_dirty_regs(); call_imm(addr); } static void amd64_free_svalue(enum amd64_reg src, int guaranteed_ref ) { LABELS();
a137542013-10-08Per Hedbor  if( src == P_REG_RAX )
719c3a2012-06-12Per Hedbor  Pike_fatal("Clobbering RAX for free-svalue\n"); /* load type -> RAX */
a137542013-10-08Per Hedbor  mov_sval_type( src, P_REG_RAX );
719c3a2012-06-12Per Hedbor 
023b792014-08-08Per Hedbor  and_reg_imm(P_REG_RAX, MIN_REF_TYPE); jz( &label_A );
719c3a2012-06-12Per Hedbor  /* Load pointer to refs -> RAX */
a137542013-10-08Per Hedbor  mov_mem_reg( src, OFFSETOF(svalue, u.refs), P_REG_RAX);
719c3a2012-06-12Per Hedbor  /* if( !--*RAX ) */
a137542013-10-08Per Hedbor  add_mem32_imm( P_REG_RAX, OFFSETOF(pike_string,refs), -1);
719c3a2012-06-12Per Hedbor  if( !guaranteed_ref ) { /* We need to see if refs got to 0. */ jnz( &label_A );
99c1e92012-06-20Per Hedbor  /* if so, call really_free_svalue */ if( src != ARG1_REG ) mov_reg_reg( src, ARG1_REG ); amd64_call_c_function(really_free_svalue); } LABEL_A; }
0c5f4c2013-06-19Henrik Grubbström (Grubba) /* Type already in register. Note: Clobbers the type register. */
99c1e92012-06-20Per Hedbor static void amd64_free_svalue_type(enum amd64_reg src, enum amd64_reg type, int guaranteed_ref ) { LABELS();
de91672013-06-12Henrik Grubbström (Grubba)  /* if type < MIN_REF_TYPE+1 */
a137542013-10-08Per Hedbor  if( src == P_REG_RAX )
99c1e92012-06-20Per Hedbor  Pike_fatal("Clobbering RAX for free-svalue\n");
1af0932012-06-22Per Hedbor 
023b792014-08-08Per Hedbor  and_reg32_imm(type, MIN_REF_TYPE); jz( &label_A );
99c1e92012-06-20Per Hedbor  /* Load pointer to refs -> RAX */
a137542013-10-08Per Hedbor  mov_mem_reg( src, OFFSETOF(svalue, u.refs), P_REG_RAX);
99c1e92012-06-20Per Hedbor  /* if( !--*RAX ) */
a137542013-10-08Per Hedbor  add_mem32_imm( P_REG_RAX, OFFSETOF(pike_string,refs), -1);
99c1e92012-06-20Per Hedbor  if( !guaranteed_ref ) { /* We need to see if refs got to 0. */ jnz( &label_A ); /* if so, call really_free_svalue */
719c3a2012-06-12Per Hedbor  if( src != ARG1_REG ) mov_reg_reg( src, ARG1_REG ); amd64_call_c_function(really_free_svalue);
ab7cf52011-05-24Henrik Grubbström (Grubba)  }
719c3a2012-06-12Per Hedbor  LABEL_A; }
b505a72012-06-13Per Hedbor void amd64_ref_svalue( enum amd64_reg src, int already_have_type )
719c3a2012-06-12Per Hedbor { LABELS();
a137542013-10-08Per Hedbor  if( src == P_REG_RAX ) Pike_fatal("Clobbering src in ref_svalue\n");
b505a72012-06-13Per Hedbor  if( !already_have_type )
a137542013-10-08Per Hedbor  mov_sval_type( src, P_REG_RAX );
023b792014-08-08Per Hedbor  and_reg32_imm( P_REG_RAX, MIN_REF_TYPE ); jz( &label_A );
719c3a2012-06-12Per Hedbor  /* Load pointer to refs -> RAX */
a137542013-10-08Per Hedbor  mov_mem_reg( src, OFFSETOF(svalue, u.refs), P_REG_RAX);
719c3a2012-06-12Per Hedbor  /* *RAX++ */
a137542013-10-08Per Hedbor  add_mem32_imm( P_REG_RAX, OFFSETOF(pike_string,refs), 1);
719c3a2012-06-12Per Hedbor  LABEL_A; }
b63e282014-08-07Per Hedbor void amd64_assign_svalue_no_free( enum amd64_reg dst, enum amd64_reg src, ptrdiff_t src_offset ) { if( dst == P_REG_RAX ||src == P_REG_RAX ) Pike_fatal( "Clobbering src/dst in amd64_assign_svalue_no_free <%d,%d>\n", dst, src); /* Copy src -> dst */ mov_mem_reg(src, src_offset, P_REG_RAX); mov_reg_mem(P_REG_RAX, dst, 0 ); mov_mem_reg(src, src_offset+sizeof(long), P_REG_RAX); mov_reg_mem(P_REG_RAX, dst, sizeof(long) ); }
719c3a2012-06-12Per Hedbor void amd64_assign_local( int b ) { amd64_load_fp_reg(); amd64_load_sp_reg();
b63e282014-08-07Per Hedbor  mov_mem_reg( fp_reg, OFFSETOF(pike_frame, locals), P_REG_RBX); add_reg_imm( P_REG_RBX, b*sizeof(struct svalue) ); amd64_free_svalue(P_REG_RBX, 0); amd64_assign_svalue_no_free( P_REG_RBX, sp_reg, -1*sizeof(struct svalue));
ab7cf52011-05-24Henrik Grubbström (Grubba) } static void amd64_pop_mark(void) {
719c3a2012-06-12Per Hedbor  amd64_add_mark_sp( -1 );
ab7cf52011-05-24Henrik Grubbström (Grubba) } static void amd64_push_string(int strno, int subtype) { amd64_load_fp_reg(); amd64_load_sp_reg();
a137542013-10-08Per Hedbor  mov_mem_reg(fp_reg, OFFSETOF(pike_frame, context), P_REG_RAX); mov_mem_reg(P_REG_RAX, OFFSETOF(inherit, prog), P_REG_RAX); mov_mem_reg(P_REG_RAX, OFFSETOF(program, strings), P_REG_RAX); mov_mem_reg(P_REG_RAX, strno * sizeof(struct pike_string *), P_REG_RAX);
73d2d42014-06-24Henrik Grubbström (Grubba)  mov_imm_mem((subtype<<16) | PIKE_T_STRING, sp_reg, OFFSETOF(svalue, tu.t.type));
a137542013-10-08Per Hedbor  mov_reg_mem(P_REG_RAX, sp_reg,(INT32)OFFSETOF(svalue, u.string)); add_imm_mem( 1, P_REG_RAX, OFFSETOF(pike_string, refs));
719c3a2012-06-12Per Hedbor  amd64_add_sp(1);
ab7cf52011-05-24Henrik Grubbström (Grubba) }
9030f62011-05-26Henrik Grubbström (Grubba) static void amd64_push_local_function(int fun) { amd64_load_fp_reg(); amd64_load_sp_reg();
a137542013-10-08Per Hedbor  mov_mem_reg(fp_reg, OFFSETOF(pike_frame, context), P_REG_RAX);
719c3a2012-06-12Per Hedbor  mov_mem_reg(fp_reg, OFFSETOF(pike_frame, current_object),
a137542013-10-08Per Hedbor  P_REG_RCX); mov_mem32_reg(P_REG_RAX, OFFSETOF(inherit, identifier_level), P_REG_RAX); mov_reg_mem(P_REG_RCX, sp_reg, OFFSETOF(svalue, u.object)); add_reg_imm(P_REG_RAX, fun); add_imm_mem( 1, P_REG_RCX,(INT32)OFFSETOF(object, refs)); shl_reg_imm(P_REG_RAX, 16); add_reg_imm(P_REG_RAX, PIKE_T_FUNCTION);
73d2d42014-06-24Henrik Grubbström (Grubba)  mov_reg_mem(P_REG_RAX, sp_reg, OFFSETOF(svalue, tu.t.type));
719c3a2012-06-12Per Hedbor  amd64_add_sp(1);
d1fa802011-05-09Henrik Grubbström (Grubba) } void amd64_update_pc(void) { INT32 tmp = PIKE_PC, disp;
a137542013-10-08Per Hedbor  const enum amd64_reg tmp_reg = P_REG_RAX;
719c3a2012-06-12Per Hedbor  if(amd64_prev_stored_pc == - 1) {
54a26b2011-05-11Henrik Grubbström (Grubba)  amd64_load_fp_reg();
719c3a2012-06-12Per Hedbor  mov_rip_imm_reg(tmp - PIKE_PC, tmp_reg); mov_reg_mem(tmp_reg, fp_reg, OFFSETOF(pike_frame, pc));
7990392006-04-27Tor Edvardsson #ifdef PIKE_DEBUG if (a_flag >= 60)
719c3a2012-06-12Per Hedbor  fprintf (stderr, "pc %d update pc via lea\n", tmp);
7990392006-04-27Tor Edvardsson #endif
bf49e12012-06-15Henrik Grubbström (Grubba)  amd64_prev_stored_pc = tmp;
7990392006-04-27Tor Edvardsson  }
719c3a2012-06-12Per Hedbor  else if ((disp = tmp - amd64_prev_stored_pc)) {
7990392006-04-27Tor Edvardsson #ifdef PIKE_DEBUG if (a_flag >= 60) fprintf (stderr, "pc %d update pc relative: %d\n", tmp, disp); #endif
54a26b2011-05-11Henrik Grubbström (Grubba)  amd64_load_fp_reg();
84c5f62012-06-28Per Hedbor  mov_rip_imm_reg(tmp - PIKE_PC, tmp_reg); mov_reg_mem(tmp_reg, fp_reg, OFFSETOF(pike_frame, pc)); /* amd64_load_fp_reg(); */ /* add_imm_mem(disp, fp_reg, OFFSETOF (pike_frame, pc)); */
bf49e12012-06-15Henrik Grubbström (Grubba)  amd64_prev_stored_pc += disp;
7990392006-04-27Tor Edvardsson  }
719c3a2012-06-12Per Hedbor  else {
7990392006-04-27Tor Edvardsson #ifdef PIKE_DEBUG if (a_flag >= 60) fprintf (stderr, "pc %d update pc - already up-to-date\n", tmp); #endif
719c3a2012-06-12Per Hedbor  }
7990392006-04-27Tor Edvardsson }
d1fa802011-05-09Henrik Grubbström (Grubba) static void maybe_update_pc(void) { static int last_prog_id=-1; static size_t last_num_linenumbers=-1;
638a2c2012-07-16Per Hedbor  if( !store_pc ) return;
719c3a2012-06-12Per Hedbor 
d1fa802011-05-09Henrik Grubbström (Grubba)  if(
7990392006-04-27Tor Edvardsson #ifdef PIKE_DEBUG /* Update the pc more often for the sake of the opcode level trace. */ d_flag || #endif (amd64_prev_stored_pc == -1) || last_prog_id != Pike_compiler->new_program->id || last_num_linenumbers != Pike_compiler->new_program->num_linenumbers ) { last_prog_id=Pike_compiler->new_program->id; last_num_linenumbers = Pike_compiler->new_program->num_linenumbers; UPDATE_PC(); } }
f3bbcc2012-07-13Henrik Grubbström (Grubba) static void maybe_load_fp(void) { static int last_prog_id=-1; static size_t last_num_linenumbers=-1;
638a2c2012-07-16Per Hedbor  if( !store_pc ) return;
f3bbcc2012-07-13Henrik Grubbström (Grubba)  if( #ifdef PIKE_DEBUG /* Update the pc more often for the sake of the opcode level trace. */ d_flag || #endif (amd64_prev_stored_pc == -1) || last_prog_id != Pike_compiler->new_program->id || last_num_linenumbers != Pike_compiler->new_program->num_linenumbers ) { amd64_load_fp_reg(); } }
719c3a2012-06-12Per Hedbor static void sync_registers(int flags) { maybe_update_pc(); flush_dirty_regs();
a137542013-10-08Per Hedbor  if (flags & I_UPDATE_SP) sp_reg = P_REG_INVALID; if (flags & I_UPDATE_M_SP) mark_sp_reg = P_REG_INVALID; if (flags & I_UPDATE_FP) fp_reg = P_REG_INVALID;
719c3a2012-06-12Per Hedbor } static void amd64_call_c_opcode(void *addr, int flags) { sync_registers(flags); call_imm( addr ); }
54a26b2011-05-11Henrik Grubbström (Grubba) #ifdef PIKE_DEBUG static void ins_debug_instr_prologue (PIKE_INSTR_T instr, INT32 arg1, INT32 arg2) { int flags = instrs[instr].flags;
f3bbcc2012-07-13Henrik Grubbström (Grubba)  /* Note: maybe_update_pc() is called by amd64_call_c_opcode() above, * which has the side-effect of loading fp_reg. Some of the * opcodes use amd64_call_c_opcode() in conditional segments. * This is to make sure that fp_reg is always loaded on exit * from such opcodes. */ maybe_load_fp();
622ebc2012-06-25Per Hedbor /* For now: It is very hard to read the disassembled source when this is inserted */ if( !d_flag ) return;
54a26b2011-05-11Henrik Grubbström (Grubba)  maybe_update_pc(); if (flags & I_HASARG2)
719c3a2012-06-12Per Hedbor  mov_imm_reg(arg2, ARG3_REG);
54a26b2011-05-11Henrik Grubbström (Grubba)  if (flags & I_HASARG)
719c3a2012-06-12Per Hedbor  mov_imm_reg(arg1, ARG2_REG); mov_imm_reg(instr, ARG1_REG);
54a26b2011-05-11Henrik Grubbström (Grubba)  if (flags & I_HASARG2) amd64_call_c_function (simple_debug_instr_prologue_2); else if (flags & I_HASARG) amd64_call_c_function (simple_debug_instr_prologue_1); else amd64_call_c_function (simple_debug_instr_prologue_0); } #else /* !PIKE_DEBUG */
d7805b2012-07-14Henrik Grubbström (Grubba) #define ins_debug_instr_prologue(instr, arg1, arg2) maybe_load_fp()
54a26b2011-05-11Henrik Grubbström (Grubba) #endif
719c3a2012-06-12Per Hedbor static void amd64_push_this_object( ) { amd64_load_fp_reg(); amd64_load_sp_reg();
73d2d42014-06-24Henrik Grubbström (Grubba)  mov_imm_mem( PIKE_T_OBJECT, sp_reg, OFFSETOF(svalue, tu.t.type));
a137542013-10-08Per Hedbor  mov_mem_reg( fp_reg, OFFSETOF(pike_frame, current_object), P_REG_RAX ); mov_reg_mem( P_REG_RAX, sp_reg, OFFSETOF(svalue,u.object) ); add_mem32_imm( P_REG_RAX, (INT32)OFFSETOF(object, refs), 1);
719c3a2012-06-12Per Hedbor  amd64_add_sp( 1 ); }
638a2c2012-07-16Per Hedbor static void amd64_align() { while( PIKE_PC & 3 ) ib( 0x90 ); } static void amd64_ins_branch_check_threads_etc(int code_only)
719c3a2012-06-12Per Hedbor { LABELS();
638a2c2012-07-16Per Hedbor  if( !branch_check_threads_update_etc )
6785ae2012-06-15Per Hedbor  {
11619b2012-07-18Henrik Grubbström (Grubba)  /* Create update + call to branch_check_threads_etc */
638a2c2012-07-16Per Hedbor  if( !code_only ) jmp( &label_A );
6785ae2012-06-15Per Hedbor  branch_check_threads_update_etc = PIKE_PC;
5e064b2012-06-15Henrik Grubbström (Grubba)  if( (unsigned long long)&fast_check_threads_counter < 0x7fffffffULL )
6785ae2012-06-15Per Hedbor  { /* Short pointer. */
a137542013-10-08Per Hedbor  clear_reg( P_REG_RAX ); add_mem32_imm( P_REG_RAX,
5e064b2012-06-15Henrik Grubbström (Grubba)  (int)(ptrdiff_t)&fast_check_threads_counter,
6785ae2012-06-15Per Hedbor  0x80 ); } else {
a137542013-10-08Per Hedbor  mov_imm_reg( (long)&fast_check_threads_counter, P_REG_RAX); add_mem_imm( P_REG_RAX, 0, 0x80 );
6785ae2012-06-15Per Hedbor  }
a137542013-10-08Per Hedbor  mov_imm_reg( (ptrdiff_t)branch_check_threads_etc, P_REG_RAX ); jmp_reg(P_REG_RAX); /* ret in BCTE will return to desired point. */
638a2c2012-07-16Per Hedbor  amd64_align(); } if( !code_only ) { LABEL_A; /* Use C-stack for counter. We have padding added in entry */
405ace2014-08-08Per Hedbor #ifndef USE_VALGRIND
a137542013-10-08Per Hedbor  add_mem8_imm( P_REG_RSP, 0, 1 );
638a2c2012-07-16Per Hedbor  jno( &label_B );
405ace2014-08-08Per Hedbor #endif
11619b2012-07-18Henrik Grubbström (Grubba)  call_rel_imm32( branch_check_threads_update_etc );
405ace2014-08-08Per Hedbor #ifndef USE_VALGRIND
1dd8052013-05-28Martin Nilsson  LABEL_B;
405ace2014-08-08Per Hedbor #endif
6785ae2012-06-15Per Hedbor  }
719c3a2012-06-12Per Hedbor }
54a26b2011-05-11Henrik Grubbström (Grubba) void amd64_init_interpreter_state(void) { instrs[F_CATCH - F_OFFSET].address = inter_return_opcode_F_CATCH; }
6785ae2012-06-15Per Hedbor static void amd64_return_from_function() { if( ret_for_func ) {
11619b2012-07-18Henrik Grubbström (Grubba)  jmp_rel_imm( ret_for_func );
6785ae2012-06-15Per Hedbor  } else { ret_for_func = PIKE_PC;
a137542013-10-08Per Hedbor  pop(P_REG_RBX); /* Stack padding. */ pop(P_REG_RBX); pop(P_REG_R12); pop(P_REG_R13); pop(P_REG_R14); pop(P_REG_R15); pop(P_REG_RBP);
6785ae2012-06-15Per Hedbor  ret(); } }
d1fa802011-05-09Henrik Grubbström (Grubba) void ins_f_byte(unsigned int b) {
54a26b2011-05-11Henrik Grubbström (Grubba)  int flags;
d1fa802011-05-09Henrik Grubbström (Grubba)  void *addr;
48f5972011-05-23Henrik Grubbström (Grubba)  INT32 rel_addr = 0;
719c3a2012-06-12Per Hedbor  LABELS();
d1fa802011-05-09Henrik Grubbström (Grubba)  b-=F_OFFSET;
7990392006-04-27Tor Edvardsson #ifdef PIKE_DEBUG
d1fa802011-05-09Henrik Grubbström (Grubba)  if(b>255) Pike_error("Instruction too big %d\n",b);
7990392006-04-27Tor Edvardsson #endif
d1fa802011-05-09Henrik Grubbström (Grubba)  maybe_update_pc();
54a26b2011-05-11Henrik Grubbström (Grubba)  flags = instrs[b].flags;
eb42a12011-05-11Henrik Grubbström (Grubba)  addr=instrs[b].address;
1af0932012-06-22Per Hedbor  switch(b + F_OFFSET) {
b505a72012-06-13Per Hedbor  case F_DUP:
6785ae2012-06-15Per Hedbor  ins_debug_instr_prologue(b, 0, 0);
c33ce92012-06-28Per Hedbor  amd64_load_sp_reg();
a137542013-10-08Per Hedbor  add_reg_imm_reg(sp_reg, -sizeof(struct svalue), P_REG_R10 ); amd64_push_svaluep( P_REG_R10 );
b505a72012-06-13Per Hedbor  return;
378ad02012-06-25Per Hedbor 
4ceb792012-06-22Per Hedbor #if 0
378ad02012-06-25Per Hedbor  case F_ESCAPE_CATCH:
1af0932012-06-22Per Hedbor  case F_EXIT_CATCH:
c33ce92012-06-28Per Hedbor  ins_debug_instr_prologue(b, 0, 0);
1af0932012-06-22Per Hedbor  ins_f_byte( F_ESCAPE_CATCH ); amd64_load_sp_reg(); amd64_push_int( 0, 1 ); return;
4ceb792012-06-22Per Hedbor #endif
378ad02012-06-25Per Hedbor  /* -3 -2 -1 0 * lval[0], lval[1], <RANDOM>, **SP** * -> * -4 -3 -2 -1 0 * lval[0], lval[1], *lval, <RANDOM>, *SP** * * This will also free *lval iff it has type * array,multiset,mapping or string. */ case F_LTOSVAL2_AND_FREE: {
c33ce92012-06-28Per Hedbor  ins_debug_instr_prologue(b, 0, 0);
378ad02012-06-25Per Hedbor  amd64_load_sp_reg();
a137542013-10-08Per Hedbor  mov_mem8_reg( sp_reg, -3*sizeof(struct svalue), P_REG_RBX ); cmp_reg_imm( P_REG_RBX, T_SVALUE_PTR );
378ad02012-06-25Per Hedbor  jne( &label_A ); /* inline version for SVALUE_PTR. */ /* First, make room for the result, move top and inc sp */
a137542013-10-08Per Hedbor  mov_mem_reg( sp_reg, -sizeof(struct svalue), P_REG_RAX ); mov_mem_reg( sp_reg, -sizeof(struct svalue)+8, P_REG_RCX ); mov_reg_mem( P_REG_RAX, sp_reg, 0); mov_reg_mem( P_REG_RCX, sp_reg, 8);
378ad02012-06-25Per Hedbor  amd64_add_sp( 1 ); mov_mem_reg( sp_reg, -4*sizeof(struct svalue)+OFFSETOF(svalue,u.lval),
a137542013-10-08Per Hedbor  P_REG_RBX ); amd64_push_svaluep_to(P_REG_RBX, -2);
378ad02012-06-25Per Hedbor  /* Result is now in sp[-2], lval in -4. */ /* push svaluep leaves type in RAX */
a137542013-10-08Per Hedbor  mov_reg_reg( P_REG_RAX, P_REG_RCX ); /* mov_mem8_reg( sp_reg, -2*sizeof(struct svalue), P_REG_RCX ); */ mov_imm_reg( 1, P_REG_RAX ); shl_reg32_reg( P_REG_RAX, P_REG_RCX ); and_reg_imm( P_REG_RAX, (BIT_ARRAY|BIT_MULTISET|BIT_MAPPING|BIT_STRING));
378ad02012-06-25Per Hedbor  jz( &label_B ); /* Do nothing.. */ /* Free the old value. */
a137542013-10-08Per Hedbor  amd64_free_svalue( P_REG_RBX, 0 );
378ad02012-06-25Per Hedbor  /* assign 0 */
a137542013-10-08Per Hedbor  mov_imm_mem( PIKE_T_INT, P_REG_RBX, 0 ); mov_imm_mem( 0, P_REG_RBX, 8 );
378ad02012-06-25Per Hedbor  jmp( &label_B ); /* All done */ LABEL_A; /* So, not a svalueptr. Use C-version */ amd64_call_c_opcode( addr, flags ); amd64_load_sp_reg(); LABEL_B; } return; case F_LTOSVAL: {
c33ce92012-06-28Per Hedbor  ins_debug_instr_prologue(b, 0, 0);
378ad02012-06-25Per Hedbor  amd64_load_sp_reg();
a137542013-10-08Per Hedbor  mov_mem8_reg( sp_reg, -sizeof(struct svalue)*2, P_REG_RAX );
378ad02012-06-25Per Hedbor  /* lval type in RAX. */ /* if( rax == T_SVALUE_PTR ) push( *l->u.lval ) possibly: if( rax == T_OBJECT ) object_index_no_free( to, lval->u.object, lval->subtype, &lval[1]) if( rax == T_ARRAY && lval[1].type == PIKE_T_INT ) push( l->u.array->item[&lval[1].u.integer] ) */
a137542013-10-08Per Hedbor  cmp_reg_imm( P_REG_RAX, T_SVALUE_PTR );
378ad02012-06-25Per Hedbor  je( &label_A ); /* So, not a svalueptr */ mov_reg_reg( sp_reg, ARG1_REG ); add_reg_imm_reg( sp_reg, -2*sizeof(struct svalue), ARG2_REG ); amd64_call_c_function(lvalue_to_svalue_no_free);
c33ce92012-06-28Per Hedbor  amd64_load_sp_reg();
378ad02012-06-25Per Hedbor  amd64_add_sp(1); jmp(&label_B); LABEL_A; mov_mem_reg( sp_reg, -sizeof(struct svalue)*2+OFFSETOF(svalue,u.lval),
a137542013-10-08Per Hedbor  P_REG_RCX ); amd64_push_svaluep(P_REG_RCX);
378ad02012-06-25Per Hedbor  LABEL_B; } return; case F_ASSIGN: {
c33ce92012-06-28Per Hedbor  ins_debug_instr_prologue(b, 0, 0);
378ad02012-06-25Per Hedbor  amd64_load_sp_reg();
a137542013-10-08Per Hedbor  mov_mem8_reg( sp_reg, -3*sizeof(struct svalue), P_REG_RAX ); cmp_reg_imm( P_REG_RAX, T_SVALUE_PTR );
378ad02012-06-25Per Hedbor  je( &label_A ); /* So, not a svalueptr. Use C-version for simplicity */ amd64_call_c_opcode( addr, flags ); amd64_load_sp_reg(); jmp( &label_B ); amd64_align(); LABEL_A;
a137542013-10-08Per Hedbor  mov_mem_reg( sp_reg, -3*sizeof(struct svalue)+8, P_REG_RBX );
378ad02012-06-25Per Hedbor  /* Free old value. */
a137542013-10-08Per Hedbor  amd64_free_svalue( P_REG_RBX, 0 );
378ad02012-06-25Per Hedbor  /* assign new value */ /* also move assigned value -> sp[-3] */
a137542013-10-08Per Hedbor  mov_mem_reg( sp_reg, -sizeof(struct svalue), P_REG_RAX ); mov_mem_reg( sp_reg, -sizeof(struct svalue)+8, P_REG_RCX ); mov_reg_mem( P_REG_RAX, P_REG_RBX, 0 ); mov_reg_mem( P_REG_RCX, P_REG_RBX, 8 ); add_reg_imm_reg( sp_reg, -sizeof(struct svalue), P_REG_RCX ); amd64_push_svaluep_to( P_REG_RCX, -3 );
378ad02012-06-25Per Hedbor  /* Note: For SVALUEPTR we know we do not have to free the lvalue. */ amd64_add_sp( -2 ); LABEL_B; } return; case F_ASSIGN_AND_POP: {
c33ce92012-06-28Per Hedbor  ins_debug_instr_prologue(b, 0, 0);
378ad02012-06-25Per Hedbor  amd64_load_sp_reg();
a137542013-10-08Per Hedbor  mov_mem8_reg( sp_reg, -3*sizeof(struct svalue), P_REG_RAX ); cmp_reg_imm( P_REG_RAX, T_SVALUE_PTR );
378ad02012-06-25Per Hedbor  je( &label_A ); /* So, not a svalueptr. Use C-version for simplicity */ amd64_call_c_opcode( addr, flags ); amd64_load_sp_reg(); jmp( &label_B ); amd64_align(); LABEL_A;
a137542013-10-08Per Hedbor  mov_mem_reg( sp_reg, -3*sizeof(struct svalue)+8, P_REG_RBX );
378ad02012-06-25Per Hedbor  /* Free old value. */
a137542013-10-08Per Hedbor  amd64_free_svalue( P_REG_RBX, 0 );
378ad02012-06-25Per Hedbor  /* assign new value */
a137542013-10-08Per Hedbor  mov_mem_reg( sp_reg, -sizeof(struct svalue), P_REG_RAX ); mov_mem_reg( sp_reg, -sizeof(struct svalue)+8, P_REG_RCX ); mov_reg_mem( P_REG_RAX, P_REG_RBX, 0 ); mov_reg_mem( P_REG_RCX, P_REG_RBX, 8 );
378ad02012-06-25Per Hedbor  /* Note: For SVALUEPTR we know we do not have to free the lvalue. */ amd64_add_sp( -3 ); LABEL_B; } return; return;
6785ae2012-06-15Per Hedbor  case F_ADD_INTS: { ins_debug_instr_prologue(b, 0, 0);
c33ce92012-06-28Per Hedbor  amd64_load_sp_reg();
b8709f2014-08-07Per Hedbor  mov_mem_reg( sp_reg, -sizeof(struct svalue)*2, P_REG_RAX ); add_reg_mem( P_REG_RAX, sp_reg, -sizeof(struct svalue ) ); #if PIKE_T_INT != 0 #error This code assumes PIKE_T_INT is 0. /* cmp_reg32_imm( P_REG_RAX, 0 ); */ #endif jnz( &label_A );
6785ae2012-06-15Per Hedbor  /* So. Both are actually integers. */ mov_mem_reg( sp_reg, -sizeof(struct svalue)+OFFSETOF(svalue,u.integer),
a137542013-10-08Per Hedbor  P_REG_RAX );
6785ae2012-06-15Per Hedbor 
a137542013-10-08Per Hedbor  add_reg_mem( P_REG_RAX,
6785ae2012-06-15Per Hedbor  sp_reg,
b8709f2014-08-07Per Hedbor  -sizeof(struct svalue)*2+OFFSETOF(svalue,u.integer));
6785ae2012-06-15Per Hedbor  jo( &label_A ); amd64_add_sp( -1 );
b8709f2014-08-07Per Hedbor  /* the type only needs to be set if the subtype was set before. It was not, since the type check before would not have triggered. */
a137542013-10-08Per Hedbor  mov_reg_mem( P_REG_RAX, sp_reg,
6785ae2012-06-15Per Hedbor  -sizeof(struct svalue)+OFFSETOF(svalue,u.integer)); jmp( &label_B ); LABEL_A; /* Fallback version */ update_arg1( 2 ); amd64_call_c_opcode( f_add, I_UPDATE_SP ); amd64_load_sp_reg(); LABEL_B; } return;
b505a72012-06-13Per Hedbor  case F_SWAP:
6785ae2012-06-15Per Hedbor  /* pike_sp[-1] = pike_sp[-2] */
c33ce92012-06-28Per Hedbor  ins_debug_instr_prologue(b, 0, 0);
6785ae2012-06-15Per Hedbor  amd64_load_sp_reg();
a137542013-10-08Per Hedbor  add_reg_imm_reg( sp_reg, -2*sizeof(struct svalue), P_REG_RCX ); mov_mem128_reg( P_REG_RCX, 0, P_REG_XMM0 ); mov_mem128_reg( P_REG_RCX, 16, P_REG_XMM1 ); mov_reg_mem128( P_REG_XMM1, P_REG_RCX, 0 ); mov_reg_mem128( P_REG_XMM0, P_REG_RCX, 16 );
622ebc2012-06-25Per Hedbor #if 0
a137542013-10-08Per Hedbor  add_reg_imm_reg( sp_reg, -2*sizeof(struct svalue), P_REG_R10); mov_mem_reg( P_REG_R10, 0, P_REG_RAX ); mov_mem_reg( P_REG_R10, 8, P_REG_RCX ); mov_mem_reg( P_REG_R10,16, P_REG_R8 ); mov_mem_reg( P_REG_R10,24, P_REG_R9 );
6785ae2012-06-15Per Hedbor  /* load done. */
a137542013-10-08Per Hedbor  mov_reg_mem(P_REG_R8, P_REG_R10,0); mov_reg_mem(P_REG_R9, P_REG_R10,8); mov_reg_mem(P_REG_RAX, P_REG_R10,sizeof(struct svalue)); mov_reg_mem(P_REG_RCX, P_REG_R10,8+sizeof(struct svalue));
6785ae2012-06-15Per Hedbor  /* save done. */
622ebc2012-06-25Per Hedbor #endif
6785ae2012-06-15Per Hedbor  return;
8886032012-06-25Per Hedbor  case F_INDEX: /* pike_sp[-2][pike_sp[-1]] */
c33ce92012-06-28Per Hedbor  ins_debug_instr_prologue(b, 0, 0);
8886032012-06-25Per Hedbor  amd64_load_sp_reg();
a137542013-10-08Per Hedbor  mov_mem8_reg( sp_reg, -2*sizeof(struct svalue), P_REG_RAX ); mov_mem8_reg( sp_reg, -1*sizeof(struct svalue), P_REG_RBX ); shl_reg_imm( P_REG_RAX, 8 ); add_reg_reg( P_REG_RAX, P_REG_RBX ); mov_mem_reg( sp_reg, -1*sizeof(struct svalue)+8, P_REG_RBX ); /* int */ mov_mem_reg( sp_reg, -2*sizeof(struct svalue)+8, P_REG_RCX ); /* value */ cmp_reg32_imm( P_REG_RAX, (PIKE_T_ARRAY<<8)|PIKE_T_INT );
8886032012-06-25Per Hedbor  jne( &label_A ); /* Array and int index. */
a137542013-10-08Per Hedbor  mov_mem32_reg( P_REG_RCX, OFFSETOF(array,size), P_REG_RDX ); cmp_reg32_imm( P_REG_RBX, 0 ); jge( &label_D );
8886032012-06-25Per Hedbor  /* less than 0, add size */
a137542013-10-08Per Hedbor  add_reg_reg( P_REG_RBX, P_REG_RDX );
8886032012-06-25Per Hedbor  LABEL_D;
a137542013-10-08Per Hedbor  cmp_reg32_imm( P_REG_RBX, 0 ); jl( &label_B ); // <0 cmp_reg_reg( P_REG_RBX, P_REG_RDX); jge( &label_B ); // >size
8886032012-06-25Per Hedbor  /* array, index inside array. push item, swap, pop, done */
a137542013-10-08Per Hedbor  push( P_REG_RCX ); /* Save array pointer */ mov_mem_reg( P_REG_RCX, OFFSETOF(array,item), P_REG_RCX ); shl_reg_imm( P_REG_RBX, 4 ); add_reg_reg( P_REG_RBX, P_REG_RCX );
8886032012-06-25Per Hedbor  /* This overwrites the array. */ amd64_add_sp( -1 );
a137542013-10-08Per Hedbor  amd64_push_svaluep_to( P_REG_RBX, -1 );
8886032012-06-25Per Hedbor 
a137542013-10-08Per Hedbor  pop( P_REG_RCX );
8886032012-06-25Per Hedbor  /* We know it's an array. */
a137542013-10-08Per Hedbor  add_mem32_imm( P_REG_RCX, OFFSETOF(array,refs), -1);
8886032012-06-25Per Hedbor  jnz( &label_C );
a137542013-10-08Per Hedbor  mov_reg_reg( P_REG_RCX, ARG1_REG );
8886032012-06-25Per Hedbor  amd64_call_c_function(really_free_array); jmp( &label_C ); LABEL_A; #if 0
a137542013-10-08Per Hedbor  cmp_reg32_imm( P_REG_RAX, (PIKE_T_STRING<<8)|PIKE_T_INT );
8886032012-06-25Per Hedbor  jne( &label_B ); #endif LABEL_B; /* something else. */ amd64_call_c_opcode( addr, flags ); amd64_load_sp_reg(); LABEL_C; /* done */ return;
ea82682012-06-25Per Hedbor  case F_SIZEOF: { LABELS(); ins_debug_instr_prologue(b, 0, 0); amd64_load_sp_reg();
a137542013-10-08Per Hedbor  add_reg_imm_reg( sp_reg, -sizeof(struct svalue), P_REG_RBX); mov_sval_type( P_REG_RBX, P_REG_RAX );
ea82682012-06-25Per Hedbor  /* type in RAX, svalue in ARG1 */
a137542013-10-08Per Hedbor  cmp_reg32_imm( P_REG_RAX, PIKE_T_ARRAY ); jne( &label_A );
ea82682012-06-25Per Hedbor  /* It's an array */
a137542013-10-08Per Hedbor  mov_mem_reg( P_REG_RBX, OFFSETOF(svalue, u.array ), ARG1_REG);
ea82682012-06-25Per Hedbor  /* load size -> RAX*/
a137542013-10-08Per Hedbor  mov_mem32_reg( ARG1_REG,OFFSETOF(array, size), P_REG_RAX );
ea82682012-06-25Per Hedbor  jmp( &label_C ); LABEL_A;
a137542013-10-08Per Hedbor  cmp_reg32_imm( P_REG_RAX, PIKE_T_STRING ); jne( &label_B );
ea82682012-06-25Per Hedbor  /* It's a string */
a137542013-10-08Per Hedbor  mov_mem_reg( P_REG_RBX, OFFSETOF(svalue, u.string ), ARG1_REG);
ea82682012-06-25Per Hedbor  /* load size ->RAX*/
a137542013-10-08Per Hedbor  mov_mem32_reg( ARG1_REG,OFFSETOF(pike_string, len ), P_REG_RAX );
ea82682012-06-25Per Hedbor  jmp( &label_C ); LABEL_B; /* It's something else, svalue in RBX. */
a137542013-10-08Per Hedbor  mov_reg_reg( P_REG_RBX, ARG1_REG );
ea82682012-06-25Per Hedbor  amd64_call_c_function( pike_sizeof );
8886032012-06-25Per Hedbor 
ea82682012-06-25Per Hedbor  LABEL_C;/* all done, res in RAX */ /* free value, store result */
a137542013-10-08Per Hedbor  push( P_REG_RAX ); amd64_free_svalue( P_REG_RBX, 0 ); pop( P_REG_RAX ); mov_reg_mem(P_REG_RAX, P_REG_RBX, OFFSETOF(svalue, u.integer));
73d2d42014-06-24Henrik Grubbström (Grubba)  mov_imm_mem(PIKE_T_INT, P_REG_RBX, OFFSETOF(svalue, tu.t.type));
ea82682012-06-25Per Hedbor  } return;
b505a72012-06-13Per Hedbor  case F_POP_VALUE:
6785ae2012-06-15Per Hedbor  { ins_debug_instr_prologue(b, 0, 0); amd64_load_sp_reg(); amd64_add_sp( -1 ); amd64_free_svalue( sp_reg, 0 ); } return;
eb42a12011-05-11Henrik Grubbström (Grubba)  case F_CATCH:
21b9112011-05-23Henrik Grubbström (Grubba)  { /* Special argument for the F_CATCH instruction. */ addr = inter_return_opcode_F_CATCH;
719c3a2012-06-12Per Hedbor  mov_rip_imm_reg(0, ARG1_REG); /* Address for the POINTER. */
48f5972011-05-23Henrik Grubbström (Grubba)  rel_addr = PIKE_PC;
21b9112011-05-23Henrik Grubbström (Grubba)  }
eb42a12011-05-11Henrik Grubbström (Grubba)  break;
1af0932012-06-22Per Hedbor  case F_ZERO_TYPE: { LABELS();
c33ce92012-06-28Per Hedbor  ins_debug_instr_prologue(b, 0, 0);
1af0932012-06-22Per Hedbor  amd64_load_sp_reg();
a137542013-10-08Per Hedbor  mov_mem32_reg( sp_reg, -sizeof(struct svalue), P_REG_RAX );
1af0932012-06-22Per Hedbor  /* Rax now has type + subtype. */
a137542013-10-08Per Hedbor  mov_reg_reg( P_REG_RAX, P_REG_RBX ); and_reg_imm( P_REG_RAX, 0x1f ); cmp_reg32_imm( P_REG_RAX, PIKE_T_INT );
1af0932012-06-22Per Hedbor  jne( &label_A ); /* It is an integer. */
a137542013-10-08Per Hedbor  shr_reg_imm( P_REG_RBX, 16 );
1af0932012-06-22Per Hedbor  /* subtype in RBX. */ mov_imm_mem( PIKE_T_INT, sp_reg, -sizeof(struct svalue) );
a137542013-10-08Per Hedbor  mov_reg_mem( P_REG_RBX, sp_reg,
1af0932012-06-22Per Hedbor  -sizeof(struct svalue)+OFFSETOF(svalue,u.integer) ); jmp( &label_B ); LABEL_A; /* not an integer. Use C version for simplicitly.. */ amd64_call_c_opcode( addr, flags ); LABEL_B; } return;
eb42a12011-05-11Henrik Grubbström (Grubba)  case F_UNDEFINED: ins_debug_instr_prologue(b, 0, 0); amd64_push_int(0, 1); return; case F_CONST0: ins_debug_instr_prologue(b, 0, 0); amd64_push_int(0, 0); return; case F_CONST1: ins_debug_instr_prologue(b, 0, 0); amd64_push_int(1, 0); return; case F_CONST_1: ins_debug_instr_prologue(b, 0, 0); amd64_push_int(-1, 0); return; case F_BIGNUM: ins_debug_instr_prologue(b, 0, 0); amd64_push_int(0x7fffffff, 0); return; case F_RETURN_1: ins_f_byte(F_CONST1); ins_f_byte(F_RETURN); return; case F_RETURN_0: ins_f_byte(F_CONST0); ins_f_byte(F_RETURN); return; case F_ADD: ins_debug_instr_prologue(b, 0, 0); update_arg1(2); addr = f_add; break;
ab7cf52011-05-24Henrik Grubbström (Grubba)  case F_MARK: case F_SYNCH_MARK: ins_debug_instr_prologue(b, 0, 0); amd64_mark(0); return; case F_MARK2: ins_f_byte(F_MARK); ins_f_byte(F_MARK); return;
91080b2012-06-15Henrik Grubbström (Grubba)  case F_MARK_AND_CONST0: ins_f_byte(F_MARK); ins_f_byte(F_CONST0); return; case F_MARK_AND_CONST1: ins_f_byte(F_MARK); ins_f_byte(F_CONST1); return;
ab7cf52011-05-24Henrik Grubbström (Grubba)  case F_POP_MARK: ins_debug_instr_prologue(b, 0, 0); amd64_pop_mark(); return;
9853bd2012-06-13Henrik Grubbström (Grubba)  case F_POP_TO_MARK: ins_debug_instr_prologue(b, 0, 0); amd64_load_mark_sp_reg(); amd64_load_sp_reg(); amd64_pop_mark();
a137542013-10-08Per Hedbor  mov_mem_reg(mark_sp_reg, 0, P_REG_RBX);
9853bd2012-06-13Henrik Grubbström (Grubba)  jmp(&label_A); LABEL_B; amd64_add_sp( -1 ); amd64_free_svalue( sp_reg, 0 );
c33ce92012-06-28Per Hedbor  amd64_load_sp_reg();
9853bd2012-06-13Henrik Grubbström (Grubba)  LABEL_A;
a137542013-10-08Per Hedbor  cmp_reg_reg(P_REG_RBX, sp_reg);
9853bd2012-06-13Henrik Grubbström (Grubba)  jl(&label_B); return;
6785ae2012-06-15Per Hedbor  /* If we are compiling with debug, F_RETURN does extra checks */
622ebc2012-06-25Per Hedbor 
6785ae2012-06-15Per Hedbor  case F_RETURN: case F_DUMB_RETURN: { LABELS();
c33ce92012-06-28Per Hedbor  ins_debug_instr_prologue(b, 0, 0);
6785ae2012-06-15Per Hedbor  amd64_load_fp_reg(); /* Note: really mem16, but we & with PIKE_FRAME_RETURN_INTERNAL anyway */
a137542013-10-08Per Hedbor  mov_mem32_reg( fp_reg, OFFSETOF(pike_frame, flags), P_REG_RAX ); and_reg_imm( P_REG_RAX, PIKE_FRAME_RETURN_INTERNAL);
6785ae2012-06-15Per Hedbor  jnz( &label_A ); /* So, it is just a normal return. */ LABEL_B; /* Actually return */ flush_dirty_regs(); amd64_return_from_function();
50ced12013-06-12Per Hedbor  /* */
6785ae2012-06-15Per Hedbor  LABEL_A;
50ced12013-06-12Per Hedbor  /* We should jump to the given address. */
a137542013-10-08Per Hedbor  mov_mem32_reg( fp_reg, OFFSETOF(pike_frame, flags), P_REG_RAX ); and_reg_imm( P_REG_RAX, PIKE_FRAME_RETURN_POP );
50ced12013-06-12Per Hedbor  jnz( &label_C ); amd64_call_c_function( low_return ); jmp( &label_D );
6785ae2012-06-15Per Hedbor 
50ced12013-06-12Per Hedbor  LABEL_C; amd64_call_c_function( low_return_pop ); LABEL_D; fp_reg = -1; amd64_load_fp_reg();
a137542013-10-08Per Hedbor  mov_mem_reg( fp_reg, OFFSETOF(pike_frame, return_addr), P_REG_RAX ); jmp_reg( P_REG_RAX );
6785ae2012-06-15Per Hedbor  } return;
50ced12013-06-12Per Hedbor 
91080b2012-06-15Henrik Grubbström (Grubba)  case F_CLEAR_STRING_SUBTYPE: ins_debug_instr_prologue(b, 0, 0); amd64_load_sp_reg();
73d2d42014-06-24Henrik Grubbström (Grubba)  mov_mem32_reg(sp_reg, OFFSETOF(svalue, tu.t.type) - sizeof(struct svalue),
a137542013-10-08Per Hedbor  P_REG_RAX);
91080b2012-06-15Henrik Grubbström (Grubba)  /* NB: We only care about subtype 1! */
a137542013-10-08Per Hedbor  cmp_reg32_imm(P_REG_RAX, (1<<16)|PIKE_T_STRING);
91080b2012-06-15Henrik Grubbström (Grubba)  jne(&label_A);
a137542013-10-08Per Hedbor  and_reg_imm(P_REG_RAX, 0x1f); mov_reg_mem32(P_REG_RAX,
73d2d42014-06-24Henrik Grubbström (Grubba)  sp_reg, OFFSETOF(svalue, tu.t.type) - sizeof(struct svalue));
91080b2012-06-15Henrik Grubbström (Grubba)  LABEL_A; return;
eb42a12011-05-11Henrik Grubbström (Grubba)  }
719c3a2012-06-12Per Hedbor  amd64_call_c_opcode(addr,flags);
54a26b2011-05-11Henrik Grubbström (Grubba)  if (instrs[b].flags & I_RETURN) {
719c3a2012-06-12Per Hedbor  LABELS();
54a26b2011-05-11Henrik Grubbström (Grubba)  if ((b + F_OFFSET) == F_RETURN_IF_TRUE) { /* Kludge. We must check if the ret addr is
719c3a2012-06-12Per Hedbor  * PC + JUMP_EPILOGUE_SIZE. */
a137542013-10-08Per Hedbor  mov_rip_imm_reg(JUMP_EPILOGUE_SIZE, P_REG_RCX);
54a26b2011-05-11Henrik Grubbström (Grubba)  }
a137542013-10-08Per Hedbor  cmp_reg_imm(P_REG_RAX, -1);
6785ae2012-06-15Per Hedbor  jne(&label_A); amd64_return_from_function();
719c3a2012-06-12Per Hedbor  LABEL_A;
54a26b2011-05-11Henrik Grubbström (Grubba)  if ((b + F_OFFSET) == F_RETURN_IF_TRUE) { /* Kludge. We must check if the ret addr is * orig_addr + JUMP_EPILOGUE_SIZE. */
a137542013-10-08Per Hedbor  cmp_reg_reg( P_REG_RAX, P_REG_RCX );
15e8a02012-06-13Henrik Grubbström (Grubba)  je( &label_B );
a137542013-10-08Per Hedbor  jmp_reg(P_REG_RAX);
15e8a02012-06-13Henrik Grubbström (Grubba)  LABEL_B;
54a26b2011-05-11Henrik Grubbström (Grubba)  return; } }
f784252011-05-11Henrik Grubbström (Grubba)  if (flags & I_JUMP) {
a137542013-10-08Per Hedbor  jmp_reg(P_REG_RAX);
48f5972011-05-23Henrik Grubbström (Grubba)  if (b + F_OFFSET == F_CATCH) { upd_pointer(rel_addr - 4, PIKE_PC - rel_addr); }
d1fa802011-05-09Henrik Grubbström (Grubba)  }
7990392006-04-27Tor Edvardsson }
54a26b2011-05-11Henrik Grubbström (Grubba) int amd64_ins_f_jump(unsigned int op, int backward_jump) { int flags; void *addr; int off = op - F_OFFSET; int ret = -1;
b505a72012-06-13Per Hedbor  LABELS();
54a26b2011-05-11Henrik Grubbström (Grubba) #ifdef PIKE_DEBUG if(off>255) Pike_error("Instruction too big %d\n",off); #endif flags = instrs[off].flags; if (!(flags & I_BRANCH)) return -1;
6785ae2012-06-15Per Hedbor #define START_JUMP() do{ \ ins_debug_instr_prologue(off, 0, 0); \ if (backward_jump) { \ maybe_update_pc(); \
638a2c2012-07-16Per Hedbor  amd64_ins_branch_check_threads_etc(0); \
6785ae2012-06-15Per Hedbor  } \ } while(0)
b505a72012-06-13Per Hedbor  switch( op ) {
99c1e92012-06-20Per Hedbor  case F_QUICK_BRANCH_WHEN_ZERO: case F_QUICK_BRANCH_WHEN_NON_ZERO: START_JUMP(); amd64_load_sp_reg(); amd64_add_sp( -1 );
a137542013-10-08Per Hedbor  mov_mem_reg( sp_reg, 8, P_REG_RAX ); test_reg(P_REG_RAX);
99c1e92012-06-20Per Hedbor  if( op == F_QUICK_BRANCH_WHEN_ZERO ) return jz_imm_rel32(0); return jnz_imm_rel32(0);
4ceb792012-06-22Per Hedbor 
99c1e92012-06-20Per Hedbor  case F_BRANCH_WHEN_ZERO: case F_BRANCH_WHEN_NON_ZERO: START_JUMP(); amd64_load_sp_reg();
a137542013-10-08Per Hedbor  mov_mem8_reg( sp_reg, -sizeof(struct svalue), P_REG_RCX ); cmp_reg32_imm( P_REG_RCX, PIKE_T_INT ); je( &label_C ); mov_imm_reg( 1, P_REG_RAX ); shl_reg32_reg( P_REG_RAX, P_REG_RCX ); and_reg32_imm( P_REG_RAX, BIT_FUNCTION|BIT_OBJECT );
378ad02012-06-25Per Hedbor  jnz( &label_A ); /* string, array, mapping or float. Always true */ amd64_add_sp(-1);
a137542013-10-08Per Hedbor  amd64_free_svalue_type( sp_reg, P_REG_RCX, 0 ); mov_imm_reg( 1, P_REG_RBX );
378ad02012-06-25Per Hedbor  jmp( &label_B );
4ceb792012-06-22Per Hedbor  LABEL_A;
378ad02012-06-25Per Hedbor  /* function or object. Use svalue_is_true. */
4ceb792012-06-22Per Hedbor  add_reg_imm_reg(sp_reg, -sizeof(struct svalue), ARG1_REG ); amd64_call_c_function(svalue_is_true);
c33ce92012-06-28Per Hedbor  amd64_load_sp_reg();
a137542013-10-08Per Hedbor  mov_reg_reg( P_REG_RAX, P_REG_RBX );
378ad02012-06-25Per Hedbor  amd64_add_sp( -1 ); amd64_free_svalue( sp_reg, 0 ); /* Pop the stack. */
99c1e92012-06-20Per Hedbor  jmp( &label_B );
4ceb792012-06-22Per Hedbor  LABEL_C; /* integer */ mov_mem_reg( sp_reg, -sizeof(struct svalue)+OFFSETOF(svalue,u.integer),
a137542013-10-08Per Hedbor  P_REG_RBX );
4ceb792012-06-22Per Hedbor  amd64_add_sp(-1);
99c1e92012-06-20Per Hedbor 
4ceb792012-06-22Per Hedbor  LABEL_B; /* Branch or not? */
a137542013-10-08Per Hedbor  test_reg( P_REG_RBX );
99c1e92012-06-20Per Hedbor  if( op == F_BRANCH_WHEN_ZERO ) return jz_imm_rel32(0); return jnz_imm_rel32(0);
4ceb792012-06-22Per Hedbor 
fdb2142012-06-25Per Hedbor  case F_FOREACH:
c33ce92012-06-28Per Hedbor  START_JUMP();
fdb2142012-06-25Per Hedbor  /* -4: array -3: lvalue[0] -2: lvalue[1] -1: counter */ amd64_load_sp_reg();
a137542013-10-08Per Hedbor  mov_mem8_reg( sp_reg, -4*sizeof(struct svalue), P_REG_RBX ); cmp_reg32_imm( P_REG_RBX, PIKE_T_ARRAY );
6e714a2013-07-06Jonas Walldén  je(&label_D); /* Bad arg 1. Let the C opcode throw the error. */ amd64_call_c_opcode(instrs[off].address, flags); /* NOT_REACHED */ LABEL_D;
a137542013-10-08Per Hedbor  mov_mem_reg( sp_reg, -1*sizeof(struct svalue)+8, P_REG_RAX ); mov_mem_reg( sp_reg, -4*sizeof(struct svalue)+8, P_REG_RBX ); mov_mem32_reg( P_REG_RBX, OFFSETOF(array,size), P_REG_RCX ); cmp_reg_reg( P_REG_RAX, P_REG_RCX );
fdb2142012-06-25Per Hedbor  je(&label_A); /* increase counter */
638a2c2012-07-16Per Hedbor  add_mem_imm( sp_reg, -1*(int)sizeof(struct svalue)+8, 1 );
fdb2142012-06-25Per Hedbor  /* get item */
a137542013-10-08Per Hedbor  mov_mem_reg( P_REG_RBX, OFFSETOF(array,item), P_REG_RBX ); shl_reg_imm( P_REG_RAX, 4 ); add_reg_reg( P_REG_RBX, P_REG_RAX );
fdb2142012-06-25Per Hedbor 
a137542013-10-08Per Hedbor  mov_mem8_reg( sp_reg, -3*sizeof(struct svalue), P_REG_RAX ); cmp_reg_imm( P_REG_RAX,T_SVALUE_PTR);
fdb2142012-06-25Per Hedbor  jne( &label_C ); /* SVALUE_PTR optimization */
a137542013-10-08Per Hedbor  mov_mem_reg( sp_reg, -3*sizeof(struct svalue)+8, P_REG_RDX ); push( P_REG_RDX );
fdb2142012-06-25Per Hedbor  /* Free old value. */
a137542013-10-08Per Hedbor  amd64_free_svalue( P_REG_RDX, 0 ); pop( P_REG_RDX );
fdb2142012-06-25Per Hedbor  /* Assign new value. */
a137542013-10-08Per Hedbor  mov_mem_reg( P_REG_RBX, 0, P_REG_RAX ); mov_mem_reg( P_REG_RBX, 8, P_REG_RCX ); mov_reg_mem( P_REG_RAX, P_REG_RDX, 0 ); mov_reg_mem( P_REG_RCX, P_REG_RDX, 8 );
fdb2142012-06-25Per Hedbor  /* inc refs? */
023b792014-08-08Per Hedbor  and_reg32_imm( P_REG_RAX, MIN_REF_TYPE ); jz( &label_B );
a137542013-10-08Per Hedbor  add_imm_mem( 1, P_REG_RCX, OFFSETOF(pike_string, refs));
fdb2142012-06-25Per Hedbor  jmp( &label_B ); LABEL_C; add_reg_imm_reg( sp_reg, -3*sizeof(struct svalue), ARG1_REG );
a137542013-10-08Per Hedbor  mov_reg_reg( P_REG_RBX, ARG2_REG );
fdb2142012-06-25Per Hedbor  amd64_call_c_function( assign_lvalue ); jmp(&label_B); LABEL_A;
a137542013-10-08Per Hedbor  mov_imm_reg( 0, P_REG_RBX );
fdb2142012-06-25Per Hedbor  LABEL_B;
a137542013-10-08Per Hedbor  test_reg(P_REG_RBX);
fdb2142012-06-25Per Hedbor  return jnz_imm_rel32(0);
b505a72012-06-13Per Hedbor  case F_LOOP:
c33ce92012-06-28Per Hedbor  START_JUMP();
6785ae2012-06-15Per Hedbor  /* counter in pike_sp-1 */ /* decrement until 0. */ /* if not 0, branch */ /* otherwise, pop */ amd64_load_sp_reg();
a137542013-10-08Per Hedbor  mov_mem32_reg( sp_reg, -sizeof(struct svalue), P_REG_RAX );
6785ae2012-06-15Per Hedbor  /* Is it a normal integer? subtype -> 0, type -> PIKE_T_INT */
a137542013-10-08Per Hedbor  cmp_reg32_imm( P_REG_RAX, PIKE_T_INT );
6785ae2012-06-15Per Hedbor  jne( &label_A ); /* if it is, is it 0? */
a137542013-10-08Per Hedbor  mov_mem_reg( sp_reg, -sizeof(struct svalue)+8, P_REG_RAX ); test_reg(P_REG_RAX);
6785ae2012-06-15Per Hedbor  jz( &label_B ); /* it is. */
a137542013-10-08Per Hedbor  add_reg_imm( P_REG_RAX, -1 ); mov_reg_mem( P_REG_RAX, sp_reg, -sizeof(struct svalue)+8); mov_imm_reg( 1, P_REG_RAX );
6785ae2012-06-15Per Hedbor  /* decremented. Jump -> true. */
1af0932012-06-22Per Hedbor  /* This is where we would really like to have two instances of * the target returned from this function... */
6785ae2012-06-15Per Hedbor  jmp( &label_C );
b505a72012-06-13Per Hedbor  LABEL_A; /* Not an integer. */
6785ae2012-06-15Per Hedbor  amd64_call_c_opcode(instrs[F_LOOP-F_OFFSET].address, instrs[F_LOOP-F_OFFSET].flags ); jmp( &label_C );
b505a72012-06-13Per Hedbor 
6785ae2012-06-15Per Hedbor  /* result in RAX */
b505a72012-06-13Per Hedbor  LABEL_B; /* loop done, inline. Known to be int, and 0 */
6785ae2012-06-15Per Hedbor  amd64_add_sp( -1 );
a137542013-10-08Per Hedbor  mov_imm_reg(0, P_REG_RAX );
b505a72012-06-13Per Hedbor  LABEL_C; /* Branch or not? */
a137542013-10-08Per Hedbor  test_reg( P_REG_RAX );
6785ae2012-06-15Per Hedbor  return jnz_imm_rel32(0); case F_BRANCH_WHEN_EQ: /* sp[-2] != sp[-1] */ case F_BRANCH_WHEN_NE: /* sp[-2] != sp[-1] */ /* START_JUMP();*/
c33ce92012-06-28Per Hedbor  ins_debug_instr_prologue(op, 0, 0);
6785ae2012-06-15Per Hedbor  amd64_load_sp_reg();
a137542013-10-08Per Hedbor  mov_mem16_reg( sp_reg, -sizeof(struct svalue), P_REG_RCX ); mov_mem16_reg( sp_reg, -sizeof(struct svalue)*2,P_REG_RBX ); cmp_reg_reg( P_REG_RCX, P_REG_RBX );
6785ae2012-06-15Per Hedbor  jnz( &label_A ); /* Types differ */
b016602012-07-19Jonas Walldén  /* Fallback to C for functions, objects and floats. */
a137542013-10-08Per Hedbor  mov_imm_reg(1, P_REG_RBX); shl_reg32_reg(P_REG_RBX, P_REG_RCX); and_reg_imm(P_REG_RBX, (BIT_FUNCTION|BIT_OBJECT|BIT_FLOAT));
b016602012-07-19Jonas Walldén  jnz( &label_A );
a137542013-10-08Per Hedbor  mov_mem_reg( sp_reg, -sizeof(struct svalue)+8, P_REG_RBX ); sub_reg_mem( P_REG_RBX, sp_reg, -sizeof(struct svalue)*2+8);
6785ae2012-06-15Per Hedbor  /* RBX will now be 0 if they are equal.*/ /* Optimization: The types are equal, pop_stack can be greatly
b016602012-07-19Jonas Walldén  * simplified if they are <= max_ref_type */
a137542013-10-08Per Hedbor  cmp_reg32_imm( P_REG_RCX,MIN_REF_TYPE);
de91672013-06-12Henrik Grubbström (Grubba)  jge( &label_B );
b016602012-07-19Jonas Walldén  /* cheap pop. We know that both are > max_ref_type */
6785ae2012-06-15Per Hedbor  amd64_add_sp( -2 ); jmp( &label_D ); LABEL_A; /* Fallback - call opcode. */ amd64_call_c_opcode( instrs[F_BRANCH_WHEN_NE-F_OFFSET].address, instrs[F_BRANCH_WHEN_NE-F_OFFSET].flags ); amd64_load_sp_reg(); /* Opcode returns 0 if equal, -1 if not. */
a137542013-10-08Per Hedbor  mov_reg_reg(P_REG_RAX, P_REG_RBX);
6785ae2012-06-15Per Hedbor  jmp(&label_D); LABEL_B; /* comparison done, pop stack x2, reftypes */ amd64_add_sp( -2 );
a137542013-10-08Per Hedbor  mov_mem_reg( sp_reg, OFFSETOF(svalue,u.refs), P_REG_RAX ); add_mem32_imm( P_REG_RAX, OFFSETOF(pike_string,refs), -1 );
6785ae2012-06-15Per Hedbor  jnz( &label_C ); /* Free sp_reg */ mov_reg_reg( sp_reg, ARG1_REG ); amd64_call_c_function( really_free_svalue );
c33ce92012-06-28Per Hedbor  amd64_load_sp_reg();
6785ae2012-06-15Per Hedbor  LABEL_C; add_reg_imm_reg( sp_reg, sizeof(struct svalue), ARG1_REG );
a137542013-10-08Per Hedbor  mov_mem_reg( ARG1_REG, OFFSETOF(svalue,u.refs), P_REG_RAX ); add_mem32_imm( P_REG_RAX, OFFSETOF(pike_string,refs), -1 );
6785ae2012-06-15Per Hedbor  jnz( &label_D ); amd64_call_c_function( really_free_svalue ); /* free sp[-2] */ LABEL_D;
a137542013-10-08Per Hedbor  test_reg(P_REG_RBX);
6785ae2012-06-15Per Hedbor  if( op == F_BRANCH_WHEN_EQ ) return jz_imm_rel32(0); return jnz_imm_rel32(0); #if 0 case F_BRANCH_WHEN_LT: /* sp[-2] < sp[-1] */ case F_BRANCH_WHEN_GE: /* sp[-2] >= sp[-1] */ case F_BRANCH_WHEN_GT: /* sp[-2] > sp[-1] */ case F_BRANCH_WHEN_LE: /* sp[-2] <= sp[-1] */ #endif case F_BRANCH: START_JUMP(); add_to_program(0xe9); ret=DO_NOT_WARN( (INT32) PIKE_PC ); PUSH_INT(0); return ret;
54a26b2011-05-11Henrik Grubbström (Grubba)  }
719c3a2012-06-12Per Hedbor  maybe_update_pc();
54a26b2011-05-11Henrik Grubbström (Grubba)  addr=instrs[off].address;
719c3a2012-06-12Per Hedbor  amd64_call_c_opcode(addr, flags);
622ebc2012-06-25Per Hedbor 
6785ae2012-06-15Per Hedbor  amd64_load_sp_reg();
a137542013-10-08Per Hedbor  test_reg(P_REG_RAX);
54a26b2011-05-11Henrik Grubbström (Grubba)  if (backward_jump) {
48f5972011-05-23Henrik Grubbström (Grubba)  INT32 skip;
54a26b2011-05-11Henrik Grubbström (Grubba)  add_to_program (0x74); /* jz rel8 */
48f5972011-05-23Henrik Grubbström (Grubba)  add_to_program (0); /* Bytes to skip. */ skip = (INT32)PIKE_PC;
638a2c2012-07-16Per Hedbor  amd64_ins_branch_check_threads_etc(0);
54a26b2011-05-11Henrik Grubbström (Grubba)  add_to_program (0xe9); /* jmp rel32 */ ret = DO_NOT_WARN ((INT32) PIKE_PC);
48f5972011-05-23Henrik Grubbström (Grubba)  PUSH_INT (0); /* Adjust the skip for the relative jump. */ Pike_compiler->new_program->program[skip-1] = ((INT32)PIKE_PC - skip);
54a26b2011-05-11Henrik Grubbström (Grubba)  } else { add_to_program (0x0f); /* jnz rel32 */ add_to_program (0x85); ret = DO_NOT_WARN ((INT32) PIKE_PC); PUSH_INT (0); } return ret; }
d1fa802011-05-09Henrik Grubbström (Grubba) void ins_f_byte_with_arg(unsigned int a, INT32 b) { maybe_update_pc();
eb42a12011-05-11Henrik Grubbström (Grubba)  switch(a) {
719c3a2012-06-12Per Hedbor  case F_THIS_OBJECT: if( b == 0 ) {
c33ce92012-06-28Per Hedbor  ins_debug_instr_prologue(a-F_OFFSET, b, 0);
719c3a2012-06-12Per Hedbor  amd64_push_this_object(); return; }
1c1a242012-06-13Henrik Grubbström (Grubba)  break; /* Fallback to C-version. */
6785ae2012-06-15Per Hedbor 
ea82682012-06-25Per Hedbor  case F_RETURN_LOCAL: /* FIXME: The C version has a trick: if locals+b < expendibles, pop to there and return. This saves a push, and the poping has to be done anyway. */ ins_f_byte_with_arg( F_LOCAL, b ); ins_f_byte( F_DUMB_RETURN ); return;
8886032012-06-25Per Hedbor  case F_NEG_INT_INDEX: { LABELS();
c33ce92012-06-28Per Hedbor  ins_debug_instr_prologue(a-F_OFFSET, b, 0);
8886032012-06-25Per Hedbor  amd64_load_sp_reg();
a137542013-10-08Per Hedbor  mov_mem8_reg( sp_reg, -1*sizeof(struct svalue), P_REG_RAX ); cmp_reg32_imm( P_REG_RAX, PIKE_T_ARRAY );
8886032012-06-25Per Hedbor  jne( &label_A );
a137542013-10-08Per Hedbor  mov_mem_reg( sp_reg, -1*sizeof(struct svalue)+8, P_REG_RDX ); /* u.array */
8886032012-06-25Per Hedbor  /* -> arr[sizeof(arr)-b] */
a137542013-10-08Per Hedbor  mov_mem32_reg( P_REG_RDX, OFFSETOF(array,size), P_REG_RBX ); sub_reg_imm( P_REG_RBX, b );
8886032012-06-25Per Hedbor  js( &label_A ); /* if signed result, index outside array */
a137542013-10-08Per Hedbor  shl_reg_imm( P_REG_RBX, 4 ); add_reg_mem( P_REG_RBX, P_REG_RDX, OFFSETOF(array,item) );
8886032012-06-25Per Hedbor  /* This overwrites the array. */
a137542013-10-08Per Hedbor  amd64_push_svaluep_to( P_REG_RBX, -1 );
8886032012-06-25Per Hedbor  /* We know it's an array. */
a137542013-10-08Per Hedbor  add_mem32_imm( P_REG_RDX, OFFSETOF(array,refs), -1);
8886032012-06-25Per Hedbor  jnz( &label_C );
a137542013-10-08Per Hedbor  mov_reg_reg( P_REG_RDX, ARG1_REG );
8886032012-06-25Per Hedbor  amd64_call_c_function(really_free_array); jmp( &label_C ); LABEL_A; update_arg1( b ); amd64_call_c_opcode(instrs[a-F_OFFSET].address, instrs[a-F_OFFSET].flags); LABEL_C; } return; case F_POS_INT_INDEX: { LABELS();
c33ce92012-06-28Per Hedbor  ins_debug_instr_prologue(a-F_OFFSET, b, 0);
8886032012-06-25Per Hedbor  amd64_load_sp_reg();
a137542013-10-08Per Hedbor  mov_mem8_reg( sp_reg, -1*sizeof(struct svalue), P_REG_RAX ); cmp_reg32_imm( P_REG_RAX, PIKE_T_ARRAY );
8886032012-06-25Per Hedbor  jne( &label_A );
a137542013-10-08Per Hedbor  mov_mem_reg( sp_reg, -1*sizeof(struct svalue)+8, P_REG_RDX ); /* u.array */
8886032012-06-25Per Hedbor  /* -> arr[sizeof(arr)-b] */
a137542013-10-08Per Hedbor  mov_mem32_reg( P_REG_RDX, OFFSETOF(array,size), P_REG_RCX ); cmp_reg32_imm( P_REG_RCX, b );
6cb8062013-06-19Henrik Grubbström (Grubba)  jle( &label_A ); /* RCX <= b, index outside array */
a137542013-10-08Per Hedbor  mov_imm_reg( b * sizeof(struct svalue), P_REG_RBX); add_reg_mem( P_REG_RBX, P_REG_RDX, OFFSETOF(array,item) );
8886032012-06-25Per Hedbor  /* This overwrites the array. */
a137542013-10-08Per Hedbor  amd64_push_svaluep_to( P_REG_RBX, -1 );
8886032012-06-25Per Hedbor  /* We know it's an array. */
a137542013-10-08Per Hedbor  add_mem32_imm( P_REG_RDX, OFFSETOF(array,refs), -1);
8886032012-06-25Per Hedbor  jnz( &label_C );
a137542013-10-08Per Hedbor  mov_reg_reg( P_REG_RDX, ARG1_REG );
8886032012-06-25Per Hedbor  amd64_call_c_function(really_free_array); jmp( &label_C ); LABEL_A; update_arg1( b ); amd64_call_c_opcode(instrs[a-F_OFFSET].address, instrs[a-F_OFFSET].flags); LABEL_C; } return; case F_LOCAL_INDEX: { LABELS(); /* pike_sp[-2][pike_sp[-1]] */
c33ce92012-06-28Per Hedbor  ins_debug_instr_prologue(a-F_OFFSET, b, 0);
8886032012-06-25Per Hedbor  amd64_load_sp_reg(); amd64_load_fp_reg();
a137542013-10-08Per Hedbor  mov_mem_reg( fp_reg, OFFSETOF(pike_frame,locals), P_REG_RDX); add_reg_imm( P_REG_RDX, b*sizeof(struct svalue)); mov_mem8_reg( P_REG_RDX, 0, P_REG_RAX ); mov_mem8_reg( sp_reg, -1*sizeof(struct svalue), P_REG_RBX ); shl_reg_imm( P_REG_RAX, 8 ); add_reg_reg( P_REG_RAX, P_REG_RBX ); mov_mem_reg( sp_reg, -1*sizeof(struct svalue)+8, P_REG_RBX ); /* int */ mov_mem_reg( P_REG_RDX, 8, P_REG_RCX ); /* value */ cmp_reg32_imm( P_REG_RAX, (PIKE_T_ARRAY<<8)|PIKE_T_INT );
8886032012-06-25Per Hedbor  jne( &label_A ); /* Array and int index. */
a137542013-10-08Per Hedbor  mov_mem32_reg( P_REG_RCX, OFFSETOF(array,size), P_REG_RDX ); cmp_reg32_imm( P_REG_RBX, 0 ); jge( &label_D );
8886032012-06-25Per Hedbor  /* less than 0, add size */
a137542013-10-08Per Hedbor  add_reg_reg( P_REG_RBX, P_REG_RDX );
8886032012-06-25Per Hedbor  LABEL_D;
a137542013-10-08Per Hedbor  cmp_reg32_imm( P_REG_RBX, 0 ); jl( &label_B ); // <0 cmp_reg_reg( P_REG_RBX, P_REG_RCX); jge( &label_B ); // >=size
8886032012-06-25Per Hedbor  /* array, index inside array. push item, swap, pop, done */
a137542013-10-08Per Hedbor  mov_mem_reg( P_REG_RCX, OFFSETOF(array,item), P_REG_RCX ); shl_reg_imm( P_REG_RBX, 4 ); add_reg_reg( P_REG_RBX, P_REG_RCX ); amd64_push_svaluep_to( P_REG_RBX, -1 );
8886032012-06-25Per Hedbor  jmp( &label_C ); LABEL_A; #if 0
a137542013-10-08Per Hedbor  cmp_reg32_imm( P_REG_RAX, (PIKE_T_STRING<<8)|PIKE_T_INT );
8886032012-06-25Per Hedbor  jne( &label_B ); #endif LABEL_B; /* something else. */ update_arg1(b); amd64_call_c_opcode(instrs[a-F_OFFSET].address, instrs[a-F_OFFSET].flags); LABEL_C; /* done */ } return;
6785ae2012-06-15Per Hedbor  case F_ADD_NEG_INT: b = -b; case F_ADD_INT: { LABELS();
c33ce92012-06-28Per Hedbor  ins_debug_instr_prologue(a-F_OFFSET, b, 0);
6785ae2012-06-15Per Hedbor  amd64_load_sp_reg();
a137542013-10-08Per Hedbor  mov_mem16_reg( sp_reg, -sizeof(struct svalue), P_REG_RAX ); cmp_reg32_imm( P_REG_RAX,PIKE_T_INT );
6785ae2012-06-15Per Hedbor  jne( &label_A ); mov_mem_reg(sp_reg, -sizeof(struct svalue)+OFFSETOF(svalue,u.integer),
a137542013-10-08Per Hedbor  P_REG_RAX ); test_reg( P_REG_RAX );
1af0932012-06-22Per Hedbor  jz( &label_C );
a137542013-10-08Per Hedbor  add_reg_imm( P_REG_RAX, b );
6785ae2012-06-15Per Hedbor  jo( &label_A ); /* if overflow, use f_add */
a137542013-10-08Per Hedbor  mov_reg_mem( P_REG_RAX,sp_reg,
6785ae2012-06-15Per Hedbor  -sizeof(struct svalue)+OFFSETOF(svalue,u.integer)); jmp(&label_B); /* all done. */ LABEL_A; amd64_push_int(b,0); update_arg1(2); amd64_call_c_opcode( f_add, I_UPDATE_SP ); amd64_load_sp_reg();
1af0932012-06-22Per Hedbor  jmp( &label_B ); LABEL_C; // int, and 0, we need to set it to b and clear subtype mov_imm_mem( PIKE_T_INT, sp_reg, -sizeof(struct svalue ) ); mov_imm_mem( b, sp_reg, -sizeof(struct svalue )+OFFSETOF(svalue,u.integer) );
6785ae2012-06-15Per Hedbor  LABEL_B; } return;
eb42a12011-05-11Henrik Grubbström (Grubba)  case F_NUMBER:
ab7cf52011-05-24Henrik Grubbström (Grubba)  ins_debug_instr_prologue(a-F_OFFSET, b, 0);
eb42a12011-05-11Henrik Grubbström (Grubba)  amd64_push_int(b, 0); return; case F_NEG_NUMBER:
ab7cf52011-05-24Henrik Grubbström (Grubba)  ins_debug_instr_prologue(a-F_OFFSET, b, 0);
eb42a12011-05-11Henrik Grubbström (Grubba)  amd64_push_int(-(INT64)b, 0); return;
ab7cf52011-05-24Henrik Grubbström (Grubba)  case F_STRING: ins_debug_instr_prologue(a-F_OFFSET, b, 0); amd64_push_string(b, 0); return; case F_ARROW_STRING: ins_debug_instr_prologue(a-F_OFFSET, b, 0); amd64_push_string(b, 1); return; case F_MARK_AND_CONST0: ins_f_byte(F_MARK); ins_f_byte(F_CONST0); return; case F_MARK_AND_CONST1: ins_f_byte(F_MARK); ins_f_byte(F_CONST0); return; case F_MARK_AND_STRING: ins_f_byte(F_MARK); ins_f_byte_with_arg(F_STRING, b); return; case F_MARK_AND_GLOBAL: ins_f_byte(F_MARK); ins_f_byte_with_arg(F_GLOBAL, b); return; case F_MARK_AND_LOCAL: ins_f_byte(F_MARK); ins_f_byte_with_arg(F_LOCAL, b); return; case F_MARK_X: ins_debug_instr_prologue(a-F_OFFSET, b, 0); amd64_mark(b); return;
9030f62011-05-26Henrik Grubbström (Grubba)  case F_LFUN: ins_debug_instr_prologue(a-F_OFFSET, b, 0); amd64_push_local_function(b); return;
719c3a2012-06-12Per Hedbor  case F_ASSIGN_LOCAL: ins_debug_instr_prologue(a-F_OFFSET, b, 0);
fd1b5a2012-06-19Henrik Grubbström (Grubba)  amd64_load_sp_reg();
719c3a2012-06-12Per Hedbor  amd64_assign_local(b); add_reg_imm_reg(sp_reg, -sizeof(struct svalue), ARG1_REG);
b505a72012-06-13Per Hedbor  amd64_ref_svalue(ARG1_REG, 0);
719c3a2012-06-12Per Hedbor  return; case F_ASSIGN_LOCAL_AND_POP: ins_debug_instr_prologue(a-F_OFFSET, b, 0); amd64_assign_local(b); amd64_add_sp(-1); return;
b63e282014-08-07Per Hedbor  case F_ASSIGN_PRIVATE_GLOBAL_AND_POP: case F_ASSIGN_PRIVATE_GLOBAL: ins_debug_instr_prologue(a-F_OFFSET, b, 0); amd64_get_storage( P_REG_RBX, b ); amd64_free_svalue( P_REG_RBX, 0 ); amd64_load_sp_reg(); if( a == F_ASSIGN_PRIVATE_GLOBAL_AND_POP ) { amd64_add_sp(-1); amd64_assign_svalue_no_free( P_REG_RBX, sp_reg, 0); } else { amd64_assign_svalue_no_free( P_REG_RBX, sp_reg, -sizeof(struct svalue)); amd64_ref_svalue(P_REG_RBX,1); /* already have type in RAX (from assign_svalue) */ } return;
719c3a2012-06-12Per Hedbor  case F_ASSIGN_GLOBAL: case F_ASSIGN_GLOBAL_AND_POP: /* arg1: pike_fp->current obj arg2: arg1+idenfier level arg3: Pike_sp-1 */ /* NOTE: We cannot simply do the same optimization as for ASSIGN_LOCAL_AND_POP with assign global, since assigning at times does not add references. We do know, however, that refs (should) never reach 0 when poping the stack. We can thus skip that part of pop_value */ ins_debug_instr_prologue(a-F_OFFSET, b, 0); amd64_load_fp_reg(); amd64_load_sp_reg(); mov_mem_reg(fp_reg, OFFSETOF(pike_frame, current_object), ARG1_REG); mov_mem_reg(fp_reg, OFFSETOF(pike_frame,context), ARG2_REG); mov_mem16_reg(ARG2_REG, OFFSETOF(inherit, identifier_level), ARG2_REG); add_reg_imm( ARG2_REG, b ); add_reg_imm_reg( sp_reg, -sizeof(struct svalue), ARG3_REG ); amd64_call_c_function( object_low_set_index ); if( a == F_ASSIGN_GLOBAL_AND_POP ) { /* assign done, pop. */
c33ce92012-06-28Per Hedbor  amd64_load_sp_reg();
719c3a2012-06-12Per Hedbor  amd64_add_sp( -1 ); amd64_free_svalue( sp_reg, 1 ); } return; case F_SIZEOF_LOCAL: { LABELS();
5c82f42012-06-13Henrik Grubbström (Grubba)  ins_debug_instr_prologue(a-F_OFFSET, b, 0);
719c3a2012-06-12Per Hedbor  amd64_load_fp_reg(); amd64_load_sp_reg(); mov_mem_reg( fp_reg, OFFSETOF(pike_frame,locals), ARG1_REG); add_reg_imm( ARG1_REG, b*sizeof(struct svalue));
a137542013-10-08Per Hedbor  mov_sval_type( ARG1_REG, P_REG_RAX );
719c3a2012-06-12Per Hedbor  /* type in RAX, svalue in ARG1 */
a137542013-10-08Per Hedbor  cmp_reg32_imm( P_REG_RAX, PIKE_T_ARRAY );
719c3a2012-06-12Per Hedbor  jne( &label_A ); /* It's an array */ /* move arg to point to the array */ mov_mem_reg( ARG1_REG, OFFSETOF(svalue, u.array ), ARG1_REG); /* load size -> RAX*/
a137542013-10-08Per Hedbor  mov_mem32_reg( ARG1_REG,OFFSETOF(array, size), P_REG_RAX );
719c3a2012-06-12Per Hedbor  jmp( &label_C ); LABEL_A;
a137542013-10-08Per Hedbor  cmp_reg32_imm( P_REG_RAX, PIKE_T_STRING );
719c3a2012-06-12Per Hedbor  jne( &label_B ); /* It's a string */ /* move arg to point to the string */ mov_mem_reg( ARG1_REG, OFFSETOF(svalue, u.string ), ARG1_REG); /* load size ->RAX*/
a137542013-10-08Per Hedbor  mov_mem32_reg( ARG1_REG,OFFSETOF(pike_string, len ), P_REG_RAX );
719c3a2012-06-12Per Hedbor  jmp( &label_C ); LABEL_B; /* It's something else, svalue already in ARG1. */ amd64_call_c_function( pike_sizeof ); LABEL_C;/* all done, res in RAX */ /* Store result on stack */
a137542013-10-08Per Hedbor  amd64_push_int_reg( P_REG_RAX );
719c3a2012-06-12Per Hedbor  } return;
b63e282014-08-07Per Hedbor  case F_PRIVATE_GLOBAL: /* This is a private (or final) global which is not a 'short' one. It is guaranteed to be available as an svalue at pike_fp->current_object->storage + pike_fp->context->storage_offset + offset (I really wish pike_fp->current_storage was kept up to date for pike function..) */ ins_debug_instr_prologue(a-F_OFFSET, b, 0); amd64_load_sp_reg(); amd64_get_storage( P_REG_RBX, b ); amd64_push_svaluep( P_REG_RBX ); return;
9030f62011-05-26Henrik Grubbström (Grubba)  case F_GLOBAL: ins_debug_instr_prologue(a-F_OFFSET, b, 0); amd64_load_fp_reg(); amd64_load_sp_reg();
719c3a2012-06-12Per Hedbor  mov_mem_reg(fp_reg, OFFSETOF(pike_frame, context), ARG3_REG); mov_mem_reg(fp_reg, OFFSETOF(pike_frame, current_object),
9030f62011-05-26Henrik Grubbström (Grubba)  ARG2_REG);
719c3a2012-06-12Per Hedbor  mov_reg_reg(sp_reg, ARG1_REG);
4ceb792012-06-22Per Hedbor  mov_mem16_reg(ARG3_REG, OFFSETOF(inherit, identifier_level),
719c3a2012-06-12Per Hedbor  ARG3_REG); add_reg_imm(ARG3_REG, b);
9030f62011-05-26Henrik Grubbström (Grubba)  flush_dirty_regs(); /* In case an error is thrown. */
719c3a2012-06-12Per Hedbor  call_imm(low_object_index_no_free);
9030f62011-05-26Henrik Grubbström (Grubba)  /* NB: We know that low_object_index_no_free() doesn't * mess with the stack pointer. */
719c3a2012-06-12Per Hedbor  amd64_add_sp(1);
9030f62011-05-26Henrik Grubbström (Grubba)  return;
719c3a2012-06-12Per Hedbor 
9030f62011-05-26Henrik Grubbström (Grubba)  case F_LOCAL: ins_debug_instr_prologue(a-F_OFFSET, b, 0); amd64_load_fp_reg(); amd64_load_sp_reg();
a137542013-10-08Per Hedbor  mov_mem_reg(fp_reg, OFFSETOF(pike_frame, locals), P_REG_RCX); add_reg_imm(P_REG_RCX, b*sizeof(struct svalue)); amd64_push_svaluep(P_REG_RCX);
9030f62011-05-26Henrik Grubbström (Grubba)  return;
719c3a2012-06-12Per Hedbor 
ea82682012-06-25Per Hedbor  case F_CLEAR_2_LOCAL:
91080b2012-06-15Henrik Grubbström (Grubba)  case F_CLEAR_LOCAL: ins_debug_instr_prologue(a-F_OFFSET, b, 0); amd64_load_fp_reg();
a137542013-10-08Per Hedbor  mov_mem_reg(fp_reg, OFFSETOF(pike_frame, locals), P_REG_RBX); add_reg_imm(P_REG_RBX, b*sizeof(struct svalue)); amd64_free_svalue(P_REG_RBX, 0);
73d2d42014-06-24Henrik Grubbström (Grubba)  mov_imm_mem(PIKE_T_INT, P_REG_RBX, OFFSETOF(svalue, tu.t.type));
a137542013-10-08Per Hedbor  mov_imm_mem(0, P_REG_RBX, OFFSETOF(svalue, u.integer));
ea82682012-06-25Per Hedbor  if( a == F_CLEAR_2_LOCAL ) {
a137542013-10-08Per Hedbor  add_reg_imm( P_REG_RBX, sizeof(struct svalue ) ); amd64_free_svalue(P_REG_RBX, 0);
73d2d42014-06-24Henrik Grubbström (Grubba)  mov_imm_mem(PIKE_T_INT, P_REG_RBX, OFFSETOF(svalue, tu.t.type));
a137542013-10-08Per Hedbor  mov_imm_mem(0, P_REG_RBX, OFFSETOF(svalue, u.integer));
ea82682012-06-25Per Hedbor  }
91080b2012-06-15Henrik Grubbström (Grubba)  return; case F_INC_LOCAL_AND_POP: { LABELS(); ins_debug_instr_prologue(a-F_OFFSET, b, 0); amd64_load_fp_reg();
a137542013-10-08Per Hedbor  mov_mem_reg(fp_reg, OFFSETOF(pike_frame, locals), P_REG_RCX); add_reg_imm(P_REG_RCX, b*sizeof(struct svalue)); mov_sval_type(P_REG_RCX, P_REG_RAX); cmp_reg32_imm(P_REG_RAX, PIKE_T_INT);
91080b2012-06-15Henrik Grubbström (Grubba)  jne(&label_A); /* Integer - Zap subtype and try just incrementing it. */
73d2d42014-06-24Henrik Grubbström (Grubba)  mov_reg_mem32(P_REG_RAX, P_REG_RCX, OFFSETOF(svalue, tu.t.type));
a137542013-10-08Per Hedbor  add_imm_mem(1, P_REG_RCX, OFFSETOF(svalue, u.integer));
ec96912012-06-17Henrik Grubbström (Grubba)  jno(&label_B);
a137542013-10-08Per Hedbor  add_imm_mem(-1, P_REG_RCX, OFFSETOF(svalue, u.integer));
91080b2012-06-15Henrik Grubbström (Grubba)  LABEL_A; /* Fallback to the C-implementation. */ update_arg1(b); amd64_call_c_opcode(instrs[a-F_OFFSET].address, instrs[a-F_OFFSET].flags); LABEL_B; } return;
b669592012-06-19Henrik Grubbström (Grubba)  case F_INC_LOCAL: ins_f_byte_with_arg(F_INC_LOCAL_AND_POP, b); ins_f_byte_with_arg(F_LOCAL, b); return; case F_POST_INC_LOCAL: ins_f_byte_with_arg(F_LOCAL, b); ins_f_byte_with_arg(F_INC_LOCAL_AND_POP, b); return;
91080b2012-06-15Henrik Grubbström (Grubba)  case F_DEC_LOCAL_AND_POP: { LABELS(); ins_debug_instr_prologue(a-F_OFFSET, b, 0); amd64_load_fp_reg();
a137542013-10-08Per Hedbor  mov_mem_reg(fp_reg, OFFSETOF(pike_frame, locals), P_REG_RCX); add_reg_imm(P_REG_RCX, b*sizeof(struct svalue)); mov_sval_type(P_REG_RCX, P_REG_RAX); cmp_reg32_imm(P_REG_RAX, PIKE_T_INT);
91080b2012-06-15Henrik Grubbström (Grubba)  jne(&label_A); /* Integer - Zap subtype and try just decrementing it. */
73d2d42014-06-24Henrik Grubbström (Grubba)  mov_reg_mem32(P_REG_RAX, P_REG_RCX, OFFSETOF(svalue, tu.t.type));
a137542013-10-08Per Hedbor  add_imm_mem(-1, P_REG_RCX, OFFSETOF(svalue, u.integer));
ec96912012-06-17Henrik Grubbström (Grubba)  jno(&label_B);
a137542013-10-08Per Hedbor  add_imm_mem(1, P_REG_RCX, OFFSETOF(svalue, u.integer));
91080b2012-06-15Henrik Grubbström (Grubba)  LABEL_A; /* Fallback to the C-implementation. */ update_arg1(b); amd64_call_c_opcode(instrs[a-F_OFFSET].address, instrs[a-F_OFFSET].flags); LABEL_B; } return;
b669592012-06-19Henrik Grubbström (Grubba)  case F_DEC_LOCAL: ins_f_byte_with_arg(F_DEC_LOCAL_AND_POP, b); ins_f_byte_with_arg(F_LOCAL, b); return; case F_POST_DEC_LOCAL: ins_f_byte_with_arg(F_LOCAL, b); ins_f_byte_with_arg(F_DEC_LOCAL_AND_POP, b); return;
ea82682012-06-25Per Hedbor #if 0 /* These really have to be done inline: reg = (sp- *--mark_sp)>>16 ltosval_and_free(-(args+2)) -> pike_sp-args call_builtin(reg) -- stack now lvalue, result, so now.. ins_f_byte( F_ASSIGN or F_ASSIGN_AND_POP ) */ case F_LTOSVAL_CALL_BUILTIN_AND_ASSIGN_POP: case F_LTOSVAL_CALL_BUILTIN_AND_ASSIGN: return; #endif
622ebc2012-06-25Per Hedbor  case F_CALL_BUILTIN_AND_POP: ins_f_byte_with_arg( F_CALL_BUILTIN, b ); ins_f_byte( F_POP_VALUE ); return; case F_MARK_CALL_BUILTIN_AND_RETURN: ins_f_byte_with_arg( F_MARK_CALL_BUILTIN, b ); ins_f_byte( F_DUMB_RETURN ); return; case F_MARK_CALL_BUILTIN_AND_POP: ins_f_byte_with_arg( F_MARK_CALL_BUILTIN, b ); ins_f_byte( F_POP_VALUE ); return; case F_CALL_BUILTIN1_AND_POP: ins_f_byte_with_arg( F_CALL_BUILTIN1, b ); ins_f_byte( F_POP_VALUE ); return; case F_CALL_BUILTIN_AND_RETURN: ins_f_byte_with_arg( F_CALL_BUILTIN, b ); ins_f_byte( F_DUMB_RETURN ); return; case F_CALL_BUILTIN: ins_debug_instr_prologue(a-F_OFFSET, b, 0); amd64_load_mark_sp_reg(); amd64_load_sp_reg();
a137542013-10-08Per Hedbor  mov_mem_reg( mark_sp_reg, -sizeof(struct svalue*), P_REG_RAX );
622ebc2012-06-25Per Hedbor  amd64_add_mark_sp( -1 ); mov_reg_reg( sp_reg, ARG1_REG );
a137542013-10-08Per Hedbor  sub_reg_reg( ARG1_REG, P_REG_RAX );
622ebc2012-06-25Per Hedbor  shr_reg_imm( ARG1_REG, 4 ); /* arg1 = (sp_reg - *--mark_sp)/16 (sizeof(svalue)) */ case F_MARK_CALL_BUILTIN: if(a == F_MARK_CALL_BUILTIN ) {
c33ce92012-06-28Per Hedbor  /* Note: It is not actually possible to do ins_debug_instr_prologue here. ins_debug_instr_prologue(a-F_OFFSET, b, 0); */
622ebc2012-06-25Per Hedbor  mov_imm_reg( 0, ARG1_REG ); } case F_CALL_BUILTIN1: if(a == F_CALL_BUILTIN1 ) {
c33ce92012-06-28Per Hedbor  /* Note: It is not actually possible to do ins_debug_instr_prologue here. ins_debug_instr_prologue(a-F_OFFSET, b, 0); */
622ebc2012-06-25Per Hedbor  mov_imm_reg( 1, ARG1_REG ); }
cf3be62014-07-15Per Hedbor 
622ebc2012-06-25Per Hedbor  /* Get function pointer */ amd64_call_c_opcode(Pike_compiler->new_program->constants[b].sval.u.efun->function, I_UPDATE_SP); return;
719c3a2012-06-12Per Hedbor  case F_CONSTANT: ins_debug_instr_prologue(a-F_OFFSET, b, 0); amd64_load_fp_reg(); amd64_load_sp_reg();
a137542013-10-08Per Hedbor  mov_mem_reg( fp_reg, OFFSETOF(pike_frame,context), P_REG_RCX ); mov_mem_reg( P_REG_RCX, OFFSETOF(inherit,prog), P_REG_RCX ); mov_mem_reg( P_REG_RCX, OFFSETOF(program,constants), P_REG_RCX ); add_reg_imm( P_REG_RCX, b*sizeof(struct program_constant) +
719c3a2012-06-12Per Hedbor  OFFSETOF(program_constant,sval) );
a137542013-10-08Per Hedbor  amd64_push_svaluep( P_REG_RCX );
719c3a2012-06-12Per Hedbor  return; case F_GLOBAL_LVALUE: ins_debug_instr_prologue(a-F_OFFSET, b, 0); amd64_load_fp_reg(); amd64_load_sp_reg(); amd64_push_this_object( );
73d2d42014-06-24Henrik Grubbström (Grubba)  mov_imm_mem( T_OBJ_INDEX, sp_reg, OFFSETOF(svalue, tu.t.type));
a137542013-10-08Per Hedbor  mov_mem_reg(fp_reg, OFFSETOF(pike_frame, context), P_REG_RAX); mov_mem16_reg( P_REG_RAX,OFFSETOF(inherit, identifier_level), P_REG_RAX); add_reg_imm( P_REG_RAX, b ); mov_reg_mem( P_REG_RAX, sp_reg, OFFSETOF(svalue,u.identifier) );
719c3a2012-06-12Per Hedbor  amd64_add_sp( 1 ); return; case F_LOCAL_LVALUE: ins_debug_instr_prologue(a-F_OFFSET, b, 0); amd64_load_fp_reg(); amd64_load_sp_reg(); /* &frame->locals[b] */
a137542013-10-08Per Hedbor  mov_mem_reg( fp_reg, OFFSETOF(pike_frame, locals), P_REG_RAX); add_reg_imm( P_REG_RAX, b*sizeof(struct svalue));
719c3a2012-06-12Per Hedbor 
73d2d42014-06-24Henrik Grubbström (Grubba)  mov_imm_mem( T_SVALUE_PTR, sp_reg, OFFSETOF(svalue, tu.t.type));
a137542013-10-08Per Hedbor  mov_reg_mem( P_REG_RAX, sp_reg, OFFSETOF(svalue,u.lval) );
73d2d42014-06-24Henrik Grubbström (Grubba)  mov_imm_mem( T_VOID, sp_reg, OFFSETOF(svalue, tu.t.type)+sizeof(struct svalue));
719c3a2012-06-12Per Hedbor  amd64_add_sp( 2 ); return;
5151e52012-06-10Henrik Grubbström (Grubba)  case F_PROTECT_STACK: ins_debug_instr_prologue(a-F_OFFSET, b, 0); amd64_load_fp_reg();
719c3a2012-06-12Per Hedbor  mov_mem_reg(fp_reg, OFFSETOF(pike_frame, locals), ARG1_REG);
5151e52012-06-10Henrik Grubbström (Grubba)  if (b) {
719c3a2012-06-12Per Hedbor  add_reg_imm_reg(ARG1_REG, sizeof(struct svalue) * b, ARG1_REG);
5151e52012-06-10Henrik Grubbström (Grubba)  }
719c3a2012-06-12Per Hedbor  mov_reg_mem(ARG1_REG, fp_reg, OFFSETOF(pike_frame, expendible));
5151e52012-06-10Henrik Grubbström (Grubba)  return; case F_MARK_AT: ins_debug_instr_prologue(a-F_OFFSET, b, 0); amd64_load_fp_reg(); amd64_load_mark_sp_reg();
719c3a2012-06-12Per Hedbor  mov_mem_reg(fp_reg, OFFSETOF(pike_frame, locals), ARG1_REG);
5151e52012-06-10Henrik Grubbström (Grubba)  if (b) {
719c3a2012-06-12Per Hedbor  add_reg_imm_reg(ARG1_REG, sizeof(struct svalue) * b, ARG1_REG);
5151e52012-06-10Henrik Grubbström (Grubba)  }
719c3a2012-06-12Per Hedbor  mov_reg_mem(ARG1_REG, mark_sp_reg, 0x00);
1af0932012-06-22Per Hedbor  amd64_add_mark_sp( 1 );
5151e52012-06-10Henrik Grubbström (Grubba)  return;
eb42a12011-05-11Henrik Grubbström (Grubba)  }
d1fa802011-05-09Henrik Grubbström (Grubba)  update_arg1(b); ins_f_byte(a);
7990392006-04-27Tor Edvardsson }
54a26b2011-05-11Henrik Grubbström (Grubba) int amd64_ins_f_jump_with_arg(unsigned int op, INT32 a, int backward_jump) {
719c3a2012-06-12Per Hedbor  LABELS();
54a26b2011-05-11Henrik Grubbström (Grubba)  if (!(instrs[op - F_OFFSET].flags & I_BRANCH)) return -1;
719c3a2012-06-12Per Hedbor  switch( op ) { case F_BRANCH_IF_NOT_LOCAL: case F_BRANCH_IF_LOCAL: ins_debug_instr_prologue(op-F_OFFSET, a, 0); amd64_load_fp_reg();
4ceb792012-06-22Per Hedbor  mov_mem_reg( fp_reg, OFFSETOF(pike_frame, locals), ARG1_REG); add_reg_imm( ARG1_REG, a*sizeof(struct svalue)); /* if( type == PIKE_T_INT )
719c3a2012-06-12Per Hedbor  u.integer -> RAX
4ceb792012-06-22Per Hedbor  else if( type == PIKE_T_OBJECT || type == PIKE_T_FUNCTION ) call svalue_is_true(&local)
1af0932012-06-22Per Hedbor  else 1 -> RAX
4ceb792012-06-22Per Hedbor  The tests are ordered assuming integers are most commonly checked. That is not nessasarily true.
719c3a2012-06-12Per Hedbor  */
a137542013-10-08Per Hedbor  mov_sval_type( ARG1_REG, P_REG_RCX ); cmp_reg32_imm( P_REG_RCX, PIKE_T_INT ); je( &label_C ); mov_imm_reg( 1, P_REG_RAX ); shl_reg32_reg( P_REG_RAX, P_REG_RCX ); and_reg32_imm( P_REG_RAX, BIT_FUNCTION|BIT_OBJECT );
378ad02012-06-25Per Hedbor  jnz( &label_A );
4ceb792012-06-22Per Hedbor  /* Not object, int or function. Always true. */
a137542013-10-08Per Hedbor  mov_imm_reg( 1, P_REG_RAX );
719c3a2012-06-12Per Hedbor  jmp( &label_B );
378ad02012-06-25Per Hedbor  LABEL_A;
4ceb792012-06-22Per Hedbor  amd64_call_c_function(svalue_is_true); jmp( &label_B ); LABEL_C;
a137542013-10-08Per Hedbor  mov_mem_reg( ARG1_REG, OFFSETOF(svalue, u.integer ), P_REG_RAX );
4ceb792012-06-22Per Hedbor  /* integer. */
719c3a2012-06-12Per Hedbor  LABEL_B;
a137542013-10-08Per Hedbor  test_reg( P_REG_RAX );
719c3a2012-06-12Per Hedbor  if( op == F_BRANCH_IF_LOCAL ) return jnz_imm_rel32(0); return jz_imm_rel32(0); }
54a26b2011-05-11Henrik Grubbström (Grubba)  maybe_update_pc(); update_arg1(a); return amd64_ins_f_jump(op, backward_jump); }
d1fa802011-05-09Henrik Grubbström (Grubba) void ins_f_byte_with_2_args(unsigned int a, INT32 b, INT32 c) { maybe_update_pc();
eb42a12011-05-11Henrik Grubbström (Grubba)  switch(a) { case F_NUMBER64:
ab7cf52011-05-24Henrik Grubbström (Grubba)  ins_debug_instr_prologue(a-F_OFFSET, b, c);
eb42a12011-05-11Henrik Grubbström (Grubba)  amd64_push_int((((unsigned INT64)b)<<32)|(unsigned INT32)c, 0); return;
ab7cf52011-05-24Henrik Grubbström (Grubba)  case F_MARK_AND_EXTERNAL: ins_f_byte(F_MARK); ins_f_byte_with_2_args(F_EXTERNAL, b, c); return;
f4107d2012-06-18Per Hedbor 
1af0932012-06-22Per Hedbor  case F_ADD_LOCAL_INT:
f4107d2012-06-18Per Hedbor  case F_ADD_LOCAL_INT_AND_POP: { LABELS(); ins_debug_instr_prologue(a-F_OFFSET, b, 0);
fd1b5a2012-06-19Henrik Grubbström (Grubba)  amd64_load_fp_reg();
f4107d2012-06-18Per Hedbor  mov_mem_reg( fp_reg, OFFSETOF(pike_frame, locals), ARG1_REG); add_reg_imm( ARG1_REG, b*sizeof(struct svalue) ); /* arg1 = dst arg2 = int */
a137542013-10-08Per Hedbor  mov_sval_type( ARG1_REG, P_REG_RAX ); cmp_reg32_imm( P_REG_RAX, PIKE_T_INT );
f4107d2012-06-18Per Hedbor  jne(&label_A); /* Fallback */
73d2d42014-06-24Henrik Grubbström (Grubba)  mov_imm_mem( PIKE_T_INT, ARG1_REG, OFFSETOF(svalue, tu.t.type));
f4107d2012-06-18Per Hedbor  add_imm_mem( c, ARG1_REG,OFFSETOF(svalue,u.integer)); jno( &label_B); add_imm_mem( -c, ARG1_REG,OFFSETOF(svalue,u.integer));
1af0932012-06-22Per Hedbor  /* Overflow. Use C version */
f4107d2012-06-18Per Hedbor  LABEL_A; update_arg2(c); update_arg1(b);
1af0932012-06-22Per Hedbor  ins_f_byte(a); /* Push already done by C version. */ if( a == F_ADD_LOCAL_INT ) jmp( &label_C );
f4107d2012-06-18Per Hedbor  LABEL_B;
1af0932012-06-22Per Hedbor  if( a == F_ADD_LOCAL_INT ) { /* push the local. */ /* We know it's an integer (since we did not use the fallback) */
c33ce92012-06-28Per Hedbor  amd64_load_sp_reg();
a137542013-10-08Per Hedbor  mov_mem_reg( ARG1_REG, OFFSETOF(svalue,u.integer), P_REG_RAX ); amd64_push_int_reg( P_REG_RAX );
1af0932012-06-22Per Hedbor  } LABEL_C;
f4107d2012-06-18Per Hedbor  return; }
cf3be62014-07-15Per Hedbor  #if 0 /* this is a: nonworking, and b: not really all that more efficient anyway.. */ case F_APPLY_N: mov_imm_reg( APPLY_SVALUE_STRICT, ARG1_REG ); mov_imm_reg( c, ARG2_REG ); mov_ptr_reg( &((Pike_fp->context->prog->constants + b)->sval), ARG3_REG ); clear_reg( ARG4_REG ); amd64_call_c_opcode(mega_apply, I_UPDATE_SP); return; #endif case F_CALL_BUILTIN_N: mov_imm_reg( c, ARG1_REG ); amd64_call_c_opcode(Pike_compiler->new_program->constants[b].sval.u.efun->function, I_UPDATE_SP); return;
f4107d2012-06-18Per Hedbor  case F_ADD_LOCALS_AND_POP: { LABELS(); ins_debug_instr_prologue(a-F_OFFSET, b, 0);
fd1b5a2012-06-19Henrik Grubbström (Grubba)  amd64_load_fp_reg();
f4107d2012-06-18Per Hedbor  mov_mem_reg( fp_reg, OFFSETOF(pike_frame, locals), ARG1_REG); add_reg_imm( ARG1_REG, b*sizeof(struct svalue) ); add_reg_imm_reg( ARG1_REG,(c-b)*sizeof(struct svalue), ARG2_REG ); /* arg1 = dst arg2 = src */
a137542013-10-08Per Hedbor  mov_sval_type( ARG1_REG, P_REG_RAX ); mov_sval_type( ARG2_REG, P_REG_RBX ); shl_reg_imm( P_REG_RAX, 8 ); add_reg_reg( P_REG_RAX, P_REG_RBX ); cmp_reg32_imm( P_REG_RAX, (PIKE_T_INT<<8) | PIKE_T_INT );
f4107d2012-06-18Per Hedbor  jne(&label_A); /* Fallback */
a137542013-10-08Per Hedbor  mov_mem_reg( ARG2_REG, OFFSETOF(svalue,u.integer), P_REG_RAX ); add_reg_mem( P_REG_RAX, ARG1_REG, OFFSETOF(svalue,u.integer));
f4107d2012-06-18Per Hedbor  jo( &label_A); /* Clear subtype */
73d2d42014-06-24Henrik Grubbström (Grubba)  mov_imm_mem( PIKE_T_INT, ARG1_REG,OFFSETOF(svalue, tu.t.type));
a137542013-10-08Per Hedbor  mov_reg_mem( P_REG_RAX, ARG1_REG, OFFSETOF(svalue,u.integer));
f4107d2012-06-18Per Hedbor  jmp( &label_B ); LABEL_A; update_arg2(c); update_arg1(b); ins_f_byte(a); /* Will call C version */ LABEL_B; return; } case F_ASSIGN_LOCAL_NUMBER_AND_POP:
c33ce92012-06-28Per Hedbor  ins_debug_instr_prologue(a-F_OFFSET, b, c);
fd1b5a2012-06-19Henrik Grubbström (Grubba)  amd64_load_fp_reg();
f4107d2012-06-18Per Hedbor  mov_mem_reg( fp_reg, OFFSETOF(pike_frame, locals), ARG1_REG); add_reg_imm( ARG1_REG,b*sizeof(struct svalue) );
a137542013-10-08Per Hedbor  mov_reg_reg( ARG1_REG, P_REG_RBX );
f4107d2012-06-18Per Hedbor  amd64_free_svalue(ARG1_REG, 0);
a137542013-10-08Per Hedbor  mov_imm_mem(c, P_REG_RBX, OFFSETOF(svalue, u.integer));
73d2d42014-06-24Henrik Grubbström (Grubba)  mov_imm_mem32(PIKE_T_INT, P_REG_RBX, OFFSETOF(svalue, tu.t.type));
f4107d2012-06-18Per Hedbor  return;
50ced12013-06-12Per Hedbor  case F_LOCAL_2_GLOBAL: ins_debug_instr_prologue(a-F_OFFSET, b, 0); amd64_load_fp_reg(); amd64_load_sp_reg(); mov_mem_reg( fp_reg, OFFSETOF(pike_frame, locals), ARG3_REG ); add_reg_imm( ARG3_REG, c*sizeof(struct svalue) ); mov_mem_reg(fp_reg, OFFSETOF(pike_frame, current_object), ARG1_REG); mov_mem_reg(fp_reg, OFFSETOF(pike_frame,context), ARG2_REG); mov_mem16_reg(ARG2_REG, OFFSETOF(inherit, identifier_level), ARG2_REG); add_reg_imm( ARG2_REG, b ); amd64_call_c_function( object_low_set_index ); return;
b505a72012-06-13Per Hedbor  case F_LOCAL_2_LOCAL:
5c82f42012-06-13Henrik Grubbström (Grubba)  ins_debug_instr_prologue(a-F_OFFSET, b, c);
b505a72012-06-13Per Hedbor  if( b != c ) { amd64_load_fp_reg();
a137542013-10-08Per Hedbor  mov_mem_reg( fp_reg, OFFSETOF(pike_frame, locals), P_REG_RBX ); add_reg_imm( P_REG_RBX, b*sizeof(struct svalue) );
b505a72012-06-13Per Hedbor  /* RBX points to dst. */
a137542013-10-08Per Hedbor  amd64_free_svalue( P_REG_RBX, 0 );
373ff52012-06-13Henrik Grubbström (Grubba)  /* assign rbx[0] = rbx[c-b] */
a137542013-10-08Per Hedbor  mov_mem_reg( P_REG_RBX, (c-b)*sizeof(struct svalue), P_REG_RAX ); mov_mem_reg( P_REG_RBX, (c-b)*sizeof(struct svalue)+8, P_REG_RCX ); mov_reg_mem( P_REG_RAX, P_REG_RBX, 0 ); mov_reg_mem( P_REG_RCX, P_REG_RBX, 8 ); amd64_ref_svalue( P_REG_RBX, 1 );
b505a72012-06-13Per Hedbor  } return;
9030f62011-05-26Henrik Grubbström (Grubba)  case F_2_LOCALS:
719c3a2012-06-12Per Hedbor #if 1
5c82f42012-06-13Henrik Grubbström (Grubba)  ins_debug_instr_prologue(a-F_OFFSET, b, c);
719c3a2012-06-12Per Hedbor  amd64_load_fp_reg(); amd64_load_sp_reg();
a137542013-10-08Per Hedbor  mov_mem_reg(fp_reg, OFFSETOF(pike_frame, locals), P_REG_R8); add_reg_imm( P_REG_R8, b*sizeof(struct svalue) ); amd64_push_svaluep(P_REG_R8); add_reg_imm( P_REG_R8, (c-b)*sizeof(struct svalue) ); amd64_push_svaluep(P_REG_R8);
719c3a2012-06-12Per Hedbor #else ins_f_byte_with_arg( F_LOCAL, b ); ins_f_byte_with_arg( F_LOCAL, c ); #endif
9030f62011-05-26Henrik Grubbström (Grubba)  return;
719c3a2012-06-12Per Hedbor 
9853bd2012-06-13Henrik Grubbström (Grubba)  case F_FILL_STACK: { LABELS(); if (!b) return; ins_debug_instr_prologue(a-F_OFFSET, b, c); amd64_load_fp_reg(); amd64_load_sp_reg(); mov_mem_reg(fp_reg, OFFSETOF(pike_frame, locals), ARG1_REG); add_reg_imm(ARG1_REG, b*sizeof(struct svalue)); jmp(&label_A); LABEL_B; amd64_push_int(0, c); LABEL_A; cmp_reg_reg(sp_reg, ARG1_REG);
8422c42013-06-19Henrik Grubbström (Grubba)  jl(&label_B);
9853bd2012-06-13Henrik Grubbström (Grubba)  } return;
5151e52012-06-10Henrik Grubbström (Grubba)  case F_INIT_FRAME: ins_debug_instr_prologue(a-F_OFFSET, b, c); amd64_load_fp_reg();
719c3a2012-06-12Per Hedbor  if(OFFSETOF(pike_frame, num_locals) != OFFSETOF(pike_frame, num_args)-2 ) Pike_fatal("This code does not with unless num_args\n" "directly follows num_locals in struct pike_frame\n"); mov_imm_mem32( (b<<16)|c, fp_reg, OFFSETOF(pike_frame, num_locals));
5151e52012-06-10Henrik Grubbström (Grubba)  return;
eb42a12011-05-11Henrik Grubbström (Grubba)  }
d1fa802011-05-09Henrik Grubbström (Grubba)  update_arg2(c); update_arg1(b); ins_f_byte(a);
7990392006-04-27Tor Edvardsson }
54a26b2011-05-11Henrik Grubbström (Grubba) int amd64_ins_f_jump_with_2_args(unsigned int op, INT32 a, INT32 b, int backward_jump) { if (!(instrs[op - F_OFFSET].flags & I_BRANCH)) return -1; maybe_update_pc(); update_arg2(b); update_arg1(a); return amd64_ins_f_jump(op, backward_jump); } void amd64_update_f_jump(INT32 offset, INT32 to_offset) { upd_pointer(offset, to_offset - offset - 4); } INT32 amd64_read_f_jump(INT32 offset) { return read_pointer(offset) + offset + 4; }