pike.git / src / peep.in

version» Context lines:

pike.git/src/peep.in:293:      OPER_INT(NUMBER,$1a)   OPER_INT(NEG_NUMBER [!INT32_NEG_OVERFLOW($1a)], -$1a)      OPER_INT(CONST0,0)   OPER_INT(CONST1,1)   OPER_INT(CONST_1,-1)      OPER_INT(BIGNUM,0x7fffffff)    + // Some noops related to the above opcodes. + // MOD_INT [$1a == 1]: POP_VALUE CONST0 // cf string % int ==> string + LSH_INT [!$1a]: + RSH_INT [!$1a]: + SUBTRACT_INT [!$1a]: + // ADD_INT [!$1a]: // cf string + int ==> string + AND_INT [!$1a]: POP_VALUE CONST0 + OR_INT [!$1a]: + XOR_INT [!$1a]: + //DIVIDE_INT [$1a == 1]: // cf string / int ==> array(string) + //MULTIPLY_INT [!$1a]: POP_VALUE CONST0 // cf string * int ==> string + MULTIPLY_INT [$1a == 1]: + MULTIPLY_INT [$1a == -1]: NEGATE +    // this should be automatically const optimized, right?   // ADD_INTS ADD_INTS: ADD_INTS ($1a+$2a)         // This set of optimizations is broken. Consider the case:   // STRING ADD_INT ADD_INT   // ADD_INT ADD_INT: ADD_INTS ($1a+$2a)   // ADD_NEG_INT ADD_NEG_INT: ADD_NEG_INT ($1a+$2a)   // ADD_NEG_INT ADD_INT [$1a <= $2a]: ADD_INT ($2a-$1a)   // ADD_NEG_INT ADD_INT [$1a > $2a]: ADD_NEG_INT ($1a-$2a)
pike.git/src/peep.in:576:   GLOBAL LOCAL_INDEX : GLOBAL_LOCAL_INDEX($1a,$2a)      SIZEOF CONST1 BRANCH_WHEN_LT : SIZEOF BRANCH_WHEN_ZERO ($3a)   SIZEOF_LOCAL CONST1 BRANCH_WHEN_LT : SIZEOF_LOCAL($1a) BRANCH_WHEN_ZERO ($3a)   SIZEOF CONST0 BRANCH_WHEN_LE : SIZEOF BRANCH_WHEN_ZERO ($3a)   SIZEOF_LOCAL CONST0 BRANCH_WHEN_LE : SIZEOF_LOCAL($1a) BRANCH_WHEN_ZERO ($3a)      CLEAR_LOCAL DEC_LOCAL_AND_POP($1a) : CONST_1 ASSIGN_LOCAL_AND_POP($1a)   CLEAR_LOCAL INC_LOCAL_AND_POP($1a) : CONST1 ASSIGN_LOCAL_AND_POP($1a)    + // Popping of side-effect free values.   NUMBER POP_VALUE : -  + NEG_NUMBER POP_VALUE:   STRING POP_VALUE :   //FLOAT POP_VALUE :   CONSTANT POP_VALUE :   LOCAL POP_VALUE : -  + GLOBAL POP_VALUE:   PRIVATE_GLOBAL POP_VALUE :   PRIVATE_TYPED_GLOBAL POP_VALUE : -  + 2_LOCALS POP_VALUE: LOCAL($1a)   TRAMPOLINE POP_VALUE : -  + CONST0 POP_VALUE: + CONST1 POP_VALUE: + CONST_1 POP_VALUE: + BIGNUM POP_VALUE: + MOD_INT POP_VALUE: POP_VALUE + LSH_INT POP_VALUE: POP_VALUE + RSH_INT POP_VALUE: POP_VALUE + SUBTRACT_INT POP_VALUE: POP_VALUE + ADD_INT POP_VALUE: POP_VALUE + AND_INT POP_VALUE: POP_VALUE + OR_INT POP_VALUE: POP_VALUE + XOR_INT POP_VALUE: POP_VALUE + DIVIDE_INT POP_VALUE: POP_VALUE + MULTIPLY_INT POP_VALUE: POP_VALUE    -  + NUMBER POP_N_ELEMS [$2a > 0]: POP_N_ELEMS($2a-1) + NEG_NUMBER POP_N_ELEMS [$2a > 0]: POP_N_ELEMS($2a-1) + STRING POP_N_ELEMS [$2a > 0]: POP_N_ELEMS($2a-1) + //FLOAT POP_N_ELEMS [$2a > 0]: POP_N_ELEMS($2a-1) + CONSTANT POP_N_ELEMS [$2a > 0]: POP_N_ELEMS($2a-1) + LOCAL POP_N_ELEMS [$2a > 0]: POP_N_ELEMS($2a-1) + GLOBAL POP_N_ELEMS [$2a > 0]: POP_N_ELEMS($2a-1) + PRIVATE_GLOBAL POP_N_ELEMS [$2a > 0]: POP_N_ELEMS($2a-1) + PRIVATE_TYPED_GLOBAL POP_N_ELEMS [$2a > 0]: POP_N_ELEMS($2a-1) + 2_LOCALS POP_N_ELEMS [$2a > 0]: LOCAL($1a) POP_N_ELEMS($2a-1) + TRAMPOLINE POP_N_ELEMS [$2a > 0]: LOCAL($1a) POP_N_ELEMS($2a-1) + CONST0 POP_N_ELEMS [$2a > 0]: POP_N_ELEMS($2a-1) + CONST1 POP_N_ELEMS [$2a > 0]: POP_N_ELEMS($2a-1) + CONST_1 POP_N_ELEMS [$2a > 0]: POP_N_ELEMS($2a-1) + BIGNUM POP_N_ELEMS [$2a > 0]: POP_N_ELEMS($2a-1) + MOD_INT POP_N_ELEMS: POP_N_ELEMS($2a) + LSH_INT POP_N_ELEMS: POP_N_ELEMS($2a) + RSH_INT POP_N_ELEMS: POP_N_ELEMS($2a) + SUBTRACT_INT POP_N_ELEMS: POP_N_ELEMS($2a) + ADD_INT POP_N_ELEMS: POP_N_ELEMS($2a) + AND_INT POP_N_ELEMS: POP_N_ELEMS($2a) + OR_INT POP_N_ELEMS: POP_N_ELEMS($2a) + XOR_INT POP_N_ELEMS: POP_N_ELEMS($2a) + DIVIDE_INT POP_N_ELEMS: POP_N_ELEMS($2a) + MULTIPLY_INT POP_N_ELEMS: POP_N_ELEMS($2a) +    RECUR RETURN [check_tailrecursion()] : TAIL_RECUR ($1a)      // This doesn't really work   // MARK TAIL_RECUR : BRANCH ($2a)   // These optimizations are now handled in optimize_eq   // CONST0 BRANCH_WHEN_EQ: BRANCH_WHEN_ZERO($2a)   // CONST0 BRANCH_WHEN_NE: BRANCH_WHEN_NON_ZERO($2a)   //   // This one is not safe.   // CONST0 EQ: NOT