401 |
401 |
402 |
402 |
403 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) { |
403 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) { |
404 assert(state != NULL, "state must be defined"); |
404 assert(state != NULL, "state must be defined"); |
405 |
405 |
|
406 #ifndef PRODUCT |
|
407 state->verify(); |
|
408 #endif |
|
409 |
406 ValueStack* s = state; |
410 ValueStack* s = state; |
407 for_each_state(s) { |
411 for_each_state(s) { |
408 if (s->kind() == ValueStack::EmptyExceptionState) { |
412 if (s->kind() == ValueStack::EmptyExceptionState) { |
409 assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty"); |
413 assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty"); |
410 continue; |
414 continue; |
1790 tty->print_cr(" ###class not loaded at load_%s bci %d", |
1794 tty->print_cr(" ###class not loaded at load_%s bci %d", |
1791 x->is_static() ? "static" : "field", x->printable_bci()); |
1795 x->is_static() ? "static" : "field", x->printable_bci()); |
1792 } |
1796 } |
1793 #endif |
1797 #endif |
1794 |
1798 |
|
1799 bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception(); |
1795 if (x->needs_null_check() && |
1800 if (x->needs_null_check() && |
1796 (needs_patching || |
1801 (needs_patching || |
1797 MacroAssembler::needs_explicit_null_check(x->offset()))) { |
1802 MacroAssembler::needs_explicit_null_check(x->offset()) || |
|
1803 stress_deopt)) { |
|
1804 LIR_Opr obj = object.result(); |
|
1805 if (stress_deopt) { |
|
1806 obj = new_register(T_OBJECT); |
|
1807 __ move(LIR_OprFact::oopConst(NULL), obj); |
|
1808 } |
1798 // emit an explicit null check because the offset is too large |
1809 // emit an explicit null check because the offset is too large |
1799 __ null_check(object.result(), new CodeEmitInfo(info)); |
1810 __ null_check(obj, new CodeEmitInfo(info)); |
1800 } |
1811 } |
1801 |
1812 |
1802 LIR_Opr reg = rlock_result(x, field_type); |
1813 LIR_Opr reg = rlock_result(x, field_type); |
1803 LIR_Address* address; |
1814 LIR_Address* address; |
1804 if (needs_patching) { |
1815 if (needs_patching) { |
1871 if (nc == NULL) { |
1884 if (nc == NULL) { |
1872 info = state_for(x); |
1885 info = state_for(x); |
1873 } else { |
1886 } else { |
1874 info = state_for(nc); |
1887 info = state_for(nc); |
1875 } |
1888 } |
|
1889 if (StressLoopInvariantCodeMotion && info->deoptimize_on_exception()) { |
|
1890 LIR_Opr obj = new_register(T_OBJECT); |
|
1891 __ move(LIR_OprFact::oopConst(NULL), obj); |
|
1892 __ null_check(obj, new CodeEmitInfo(info)); |
|
1893 } |
1876 } |
1894 } |
1877 __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none); |
1895 __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none); |
1878 } |
1896 } |
1879 |
1897 |
1880 |
1898 |
1881 void LIRGenerator::do_LoadIndexed(LoadIndexed* x) { |
1899 void LIRGenerator::do_LoadIndexed(LoadIndexed* x) { |
1882 bool use_length = x->length() != NULL; |
1900 bool use_length = x->length() != NULL; |
1883 LIRItem array(x->array(), this); |
1901 LIRItem array(x->array(), this); |
1884 LIRItem index(x->index(), this); |
1902 LIRItem index(x->index(), this); |
1885 LIRItem length(this); |
1903 LIRItem length(this); |
1886 bool needs_range_check = true; |
1904 bool needs_range_check = x->compute_needs_range_check(); |
1887 |
1905 |
1888 if (use_length) { |
1906 if (use_length && needs_range_check) { |
1889 needs_range_check = x->compute_needs_range_check(); |
1907 length.set_instruction(x->length()); |
1890 if (needs_range_check) { |
1908 length.load_item(); |
1891 length.set_instruction(x->length()); |
|
1892 length.load_item(); |
|
1893 } |
|
1894 } |
1909 } |
1895 |
1910 |
1896 array.load_item(); |
1911 array.load_item(); |
1897 if (index.is_constant() && can_inline_as_constant(x->index())) { |
1912 if (index.is_constant() && can_inline_as_constant(x->index())) { |
1898 // let it be a constant |
1913 // let it be a constant |
1908 if (nc != NULL) { |
1923 if (nc != NULL) { |
1909 null_check_info = state_for(nc); |
1924 null_check_info = state_for(nc); |
1910 } else { |
1925 } else { |
1911 null_check_info = range_check_info; |
1926 null_check_info = range_check_info; |
1912 } |
1927 } |
|
1928 if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) { |
|
1929 LIR_Opr obj = new_register(T_OBJECT); |
|
1930 __ move(LIR_OprFact::oopConst(NULL), obj); |
|
1931 __ null_check(obj, new CodeEmitInfo(null_check_info)); |
|
1932 } |
1913 } |
1933 } |
1914 |
1934 |
1915 // emit array address setup early so it schedules better |
1935 // emit array address setup early so it schedules better |
1916 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false); |
1936 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false); |
1917 |
1937 |
1918 if (GenerateRangeChecks && needs_range_check) { |
1938 if (GenerateRangeChecks && needs_range_check) { |
1919 if (use_length) { |
1939 if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) { |
|
1940 __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result())); |
|
1941 } else if (use_length) { |
1920 // TODO: use a (modified) version of array_range_check that does not require a |
1942 // TODO: use a (modified) version of array_range_check that does not require a |
1921 // constant length to be loaded to a register |
1943 // constant length to be loaded to a register |
1922 __ cmp(lir_cond_belowEqual, length.result(), index.result()); |
1944 __ cmp(lir_cond_belowEqual, length.result(), index.result()); |
1923 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result())); |
1945 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result())); |
1924 } else { |
1946 } else { |
2632 |
2654 |
2633 if (method()->is_synchronized() && GenerateSynchronizationCode) { |
2655 if (method()->is_synchronized() && GenerateSynchronizationCode) { |
2634 LIR_Opr lock = new_register(T_INT); |
2656 LIR_Opr lock = new_register(T_INT); |
2635 __ load_stack_address_monitor(0, lock); |
2657 __ load_stack_address_monitor(0, lock); |
2636 |
2658 |
2637 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL); |
2659 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException)); |
2638 CodeStub* slow_path = new MonitorEnterStub(obj, lock, info); |
2660 CodeStub* slow_path = new MonitorEnterStub(obj, lock, info); |
2639 |
2661 |
2640 // receiver is guaranteed non-NULL so don't need CodeEmitInfo |
2662 // receiver is guaranteed non-NULL so don't need CodeEmitInfo |
2641 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL); |
2663 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL); |
2642 } |
2664 } |
2643 } |
2665 } |
2644 |
2666 |
2645 // increment invocation counters if needed |
2667 // increment invocation counters if needed |
2646 if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting. |
2668 if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting. |
2647 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL); |
2669 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false); |
2648 increment_invocation_counter(info); |
2670 increment_invocation_counter(info); |
2649 } |
2671 } |
2650 |
2672 |
2651 // all blocks with a successor must end with an unconditional jump |
2673 // all blocks with a successor must end with an unconditional jump |
2652 // to the successor even if they are consecutive |
2674 // to the successor even if they are consecutive |
3100 } else { |
3122 } else { |
3101 __ move(result, rlock_result(x)); |
3123 __ move(result, rlock_result(x)); |
3102 } |
3124 } |
3103 } |
3125 } |
3104 |
3126 |
|
3127 void LIRGenerator::do_Assert(Assert *x) { |
|
3128 #ifdef ASSERT |
|
3129 ValueTag tag = x->x()->type()->tag(); |
|
3130 If::Condition cond = x->cond(); |
|
3131 |
|
3132 LIRItem xitem(x->x(), this); |
|
3133 LIRItem yitem(x->y(), this); |
|
3134 LIRItem* xin = &xitem; |
|
3135 LIRItem* yin = &yitem; |
|
3136 |
|
3137 assert(tag == intTag, "Only integer assertions are valid!"); |
|
3138 |
|
3139 xin->load_item(); |
|
3140 yin->dont_load_item(); |
|
3141 |
|
3142 set_no_result(x); |
|
3143 |
|
3144 LIR_Opr left = xin->result(); |
|
3145 LIR_Opr right = yin->result(); |
|
3146 |
|
3147 __ lir_assert(lir_cond(x->cond()), left, right, x->message(), true); |
|
3148 #endif |
|
3149 } |
|
3150 |
|
3151 |
|
3152 void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) { |
|
3153 |
|
3154 |
|
3155 Instruction *a = x->x(); |
|
3156 Instruction *b = x->y(); |
|
3157 if (!a || StressRangeCheckElimination) { |
|
3158 assert(!b || StressRangeCheckElimination, "B must also be null"); |
|
3159 |
|
3160 CodeEmitInfo *info = state_for(x, x->state()); |
|
3161 CodeStub* stub = new PredicateFailedStub(info); |
|
3162 |
|
3163 __ jump(stub); |
|
3164 } else if (a->type()->as_IntConstant() && b->type()->as_IntConstant()) { |
|
3165 int a_int = a->type()->as_IntConstant()->value(); |
|
3166 int b_int = b->type()->as_IntConstant()->value(); |
|
3167 |
|
3168 bool ok = false; |
|
3169 |
|
3170 switch(x->cond()) { |
|
3171 case Instruction::eql: ok = (a_int == b_int); break; |
|
3172 case Instruction::neq: ok = (a_int != b_int); break; |
|
3173 case Instruction::lss: ok = (a_int < b_int); break; |
|
3174 case Instruction::leq: ok = (a_int <= b_int); break; |
|
3175 case Instruction::gtr: ok = (a_int > b_int); break; |
|
3176 case Instruction::geq: ok = (a_int >= b_int); break; |
|
3177 case Instruction::aeq: ok = ((unsigned int)a_int >= (unsigned int)b_int); break; |
|
3178 case Instruction::beq: ok = ((unsigned int)a_int <= (unsigned int)b_int); break; |
|
3179 default: ShouldNotReachHere(); |
|
3180 } |
|
3181 |
|
3182 if (ok) { |
|
3183 |
|
3184 CodeEmitInfo *info = state_for(x, x->state()); |
|
3185 CodeStub* stub = new PredicateFailedStub(info); |
|
3186 |
|
3187 __ jump(stub); |
|
3188 } |
|
3189 } else { |
|
3190 |
|
3191 ValueTag tag = x->x()->type()->tag(); |
|
3192 If::Condition cond = x->cond(); |
|
3193 LIRItem xitem(x->x(), this); |
|
3194 LIRItem yitem(x->y(), this); |
|
3195 LIRItem* xin = &xitem; |
|
3196 LIRItem* yin = &yitem; |
|
3197 |
|
3198 assert(tag == intTag, "Only integer deoptimizations are valid!"); |
|
3199 |
|
3200 xin->load_item(); |
|
3201 yin->dont_load_item(); |
|
3202 set_no_result(x); |
|
3203 |
|
3204 LIR_Opr left = xin->result(); |
|
3205 LIR_Opr right = yin->result(); |
|
3206 |
|
3207 CodeEmitInfo *info = state_for(x, x->state()); |
|
3208 CodeStub* stub = new PredicateFailedStub(info); |
|
3209 |
|
3210 __ cmp(lir_cond(cond), left, right); |
|
3211 __ branch(lir_cond(cond), right->type(), stub); |
|
3212 } |
|
3213 } |
|
3214 |
|
3215 |
3105 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) { |
3216 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) { |
3106 LIRItemList args(1); |
3217 LIRItemList args(1); |
3107 LIRItem value(arg1, this); |
3218 LIRItem value(arg1, this); |
3108 args.append(&value); |
3219 args.append(&value); |
3109 BasicTypeList signature; |
3220 BasicTypeList signature; |