7083786: dead various dead chunks of code
authornever
Wed, 31 Aug 2011 16:46:11 -0700
changeset 10508 233d2e7c462d
parent 10507 4b1c5c1cf1b8
child 10509 43d670e5701e
7083786: dead various dead chunks of code Reviewed-by: iveresov, kvn
hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp
hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp
hotspot/src/cpu/sparc/vm/frame_sparc.hpp
hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.hpp
hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp
hotspot/src/share/vm/c1/c1_Compilation.cpp
hotspot/src/share/vm/c1/c1_LIRAssembler.hpp
hotspot/src/share/vm/c1/c1_Runtime1.cpp
hotspot/src/share/vm/c1/c1_Runtime1.hpp
hotspot/src/share/vm/ci/ciConstant.hpp
hotspot/src/share/vm/ci/ciEnv.cpp
hotspot/src/share/vm/ci/ciEnv.hpp
hotspot/src/share/vm/ci/ciField.hpp
hotspot/src/share/vm/code/nmethod.cpp
hotspot/src/share/vm/code/nmethod.hpp
hotspot/src/share/vm/oops/constMethodKlass.cpp
hotspot/src/share/vm/opto/compile.cpp
hotspot/src/share/vm/opto/connode.hpp
hotspot/src/share/vm/opto/parse2.cpp
hotspot/src/share/vm/opto/runtime.cpp
hotspot/src/share/vm/prims/forte.cpp
hotspot/src/share/vm/runtime/thread.cpp
hotspot/src/share/vm/runtime/thread.hpp
--- a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Wed Aug 31 09:48:21 2011 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Wed Aug 31 16:46:11 2011 -0700
@@ -142,11 +142,6 @@
 }
 
 
-LIR_Opr LIR_Assembler::incomingReceiverOpr() {
-  return FrameMap::I0_oop_opr;
-}
-
-
 LIR_Opr LIR_Assembler::osrBufferPointer() {
   return FrameMap::I0_opr;
 }
--- a/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Wed Aug 31 09:48:21 2011 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Wed Aug 31 16:46:11 2011 -0700
@@ -782,13 +782,6 @@
       }
       break;
 
-    case jvmti_exception_throw_id:
-      { // Oexception : exception
-        __ set_info("jvmti_exception_throw", dont_gc_arguments);
-        oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, Runtime1::post_jvmti_exception_throw), I0);
-      }
-      break;
-
     case dtrace_object_alloc_id:
       { // O0: object
         __ set_info("dtrace_object_alloc", dont_gc_arguments);
--- a/hotspot/src/cpu/sparc/vm/frame_sparc.hpp	Wed Aug 31 09:48:21 2011 -0700
+++ b/hotspot/src/cpu/sparc/vm/frame_sparc.hpp	Wed Aug 31 16:46:11 2011 -0700
@@ -259,13 +259,8 @@
   };
 #endif /* CC_INTERP */
 
-  // the compiler frame has many of the same fields as the interpreter frame
-  // %%%%% factor out declarations of the shared fields
   enum compiler_frame_fixed_locals {
-       compiler_frame_d_scratch_fp_offset          = -2,
-       compiler_frame_vm_locals_fp_offset          = -2, // should be same as above
-
-       compiler_frame_vm_local_words = -compiler_frame_vm_locals_fp_offset
+       compiler_frame_vm_locals_fp_offset          = -2
   };
 
  private:
@@ -283,9 +278,6 @@
 
   inline void interpreter_frame_set_tos_address(intptr_t* x);
 
-
-  // %%%%% Another idea: instead of defining 3 fns per item, just define one returning a ref
-
   // monitors:
 
   // next two fns read and write Lmonitors value,
@@ -298,22 +290,8 @@
     return ((interpreterState)sp_at(interpreter_state_ptr_offset));
   }
 
-
 #endif /* CC_INTERP */
 
-
-
- // Compiled frames
-
  public:
-  // Tells if this register can hold 64 bits on V9 (really, V8+).
-  static bool holds_a_doubleword(Register reg) {
-#ifdef _LP64
-    //    return true;
-    return reg->is_out() || reg->is_global();
-#else
-    return reg->is_out() || reg->is_global();
-#endif
-  }
 
 #endif // CPU_SPARC_VM_FRAME_SPARC_HPP
--- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Wed Aug 31 09:48:21 2011 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Wed Aug 31 16:46:11 2011 -0700
@@ -129,10 +129,6 @@
   return FrameMap::receiver_opr;
 }
 
-LIR_Opr LIR_Assembler::incomingReceiverOpr() {
-  return receiverOpr();
-}
-
 LIR_Opr LIR_Assembler::osrBufferPointer() {
   return FrameMap::as_pointer_opr(receiverOpr()->as_register());
 }
@@ -371,55 +367,6 @@
 }
 
 
-void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register new_hdr, int monitor_no, Register exception) {
-  if (exception->is_valid()) {
-    // preserve exception
-    // note: the monitor_exit runtime call is a leaf routine
-    //       and cannot block => no GC can happen
-    // The slow case (MonitorAccessStub) uses the first two stack slots
-    // ([esp+0] and [esp+4]), therefore we store the exception at [esp+8]
-    __ movptr (Address(rsp, 2*wordSize), exception);
-  }
-
-  Register obj_reg  = obj_opr->as_register();
-  Register lock_reg = lock_opr->as_register();
-
-  // setup registers (lock_reg must be rax, for lock_object)
-  assert(obj_reg != SYNC_header && lock_reg != SYNC_header, "rax, must be available here");
-  Register hdr = lock_reg;
-  assert(new_hdr == SYNC_header, "wrong register");
-  lock_reg = new_hdr;
-  // compute pointer to BasicLock
-  Address lock_addr = frame_map()->address_for_monitor_lock(monitor_no);
-  __ lea(lock_reg, lock_addr);
-  // unlock object
-  MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, true, monitor_no);
-  // _slow_case_stubs->append(slow_case);
-  // temporary fix: must be created after exceptionhandler, therefore as call stub
-  _slow_case_stubs->append(slow_case);
-  if (UseFastLocking) {
-    // try inlined fast unlocking first, revert to slow locking if it fails
-    // note: lock_reg points to the displaced header since the displaced header offset is 0!
-    assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
-    __ unlock_object(hdr, obj_reg, lock_reg, *slow_case->entry());
-  } else {
-    // always do slow unlocking
-    // note: the slow unlocking code could be inlined here, however if we use
-    //       slow unlocking, speed doesn't matter anyway and this solution is
-    //       simpler and requires less duplicated code - additionally, the
-    //       slow unlocking code is the same in either case which simplifies
-    //       debugging
-    __ jmp(*slow_case->entry());
-  }
-  // done
-  __ bind(*slow_case->continuation());
-
-  if (exception->is_valid()) {
-    // restore exception
-    __ movptr (exception, Address(rsp, 2 * wordSize));
-  }
-}
-
 // This specifies the rsp decrement needed to build the frame
 int LIR_Assembler::initial_frame_size_in_bytes() {
   // if rounding, must let FrameMap know!
--- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.hpp	Wed Aug 31 09:48:21 2011 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.hpp	Wed Aug 31 16:46:11 2011 -0700
@@ -29,8 +29,6 @@
 
   Address::ScaleFactor array_element_size(BasicType type) const;
 
-  void monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register new_hdr, int monitor_no, Register exception);
-
   void arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack);
 
   // helper functions which checks for overflow and sets bailout if it
--- a/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Wed Aug 31 09:48:21 2011 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Wed Aug 31 16:46:11 2011 -0700
@@ -1465,19 +1465,6 @@
       }
       break;
 
-    case jvmti_exception_throw_id:
-      { // rax,: exception oop
-        StubFrame f(sasm, "jvmti_exception_throw", dont_gc_arguments);
-        // Preserve all registers across this potentially blocking call
-        const int num_rt_args = 2;  // thread, exception oop
-        OopMap* map = save_live_registers(sasm, num_rt_args);
-        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, Runtime1::post_jvmti_exception_throw), rax);
-        oop_maps = new OopMapSet();
-        oop_maps->add_gc_map(call_offset, map);
-        restore_live_registers(sasm);
-      }
-      break;
-
     case dtrace_object_alloc_id:
       { // rax,: object
         StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
--- a/hotspot/src/share/vm/c1/c1_Compilation.cpp	Wed Aug 31 09:48:21 2011 -0700
+++ b/hotspot/src/share/vm/c1/c1_Compilation.cpp	Wed Aug 31 16:46:11 2011 -0700
@@ -346,7 +346,6 @@
     implicit_exception_table(),
     compiler(),
     _env->comp_level(),
-    true,
     has_unsafe_access()
   );
 }
--- a/hotspot/src/share/vm/c1/c1_LIRAssembler.hpp	Wed Aug 31 09:48:21 2011 -0700
+++ b/hotspot/src/share/vm/c1/c1_LIRAssembler.hpp	Wed Aug 31 16:46:11 2011 -0700
@@ -133,7 +133,6 @@
   static bool is_small_constant(LIR_Opr opr);
 
   static LIR_Opr receiverOpr();
-  static LIR_Opr incomingReceiverOpr();
   static LIR_Opr osrBufferPointer();
 
   // stubs
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Wed Aug 31 09:48:21 2011 -0700
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Wed Aug 31 16:46:11 2011 -0700
@@ -375,14 +375,6 @@
 JRT_END
 
 
-JRT_ENTRY(void, Runtime1::post_jvmti_exception_throw(JavaThread* thread))
-  if (JvmtiExport::can_post_on_exceptions()) {
-    vframeStream vfst(thread, true);
-    address bcp = vfst.method()->bcp_from(vfst.bci());
-    JvmtiExport::post_exception_throw(thread, vfst.method(), bcp, thread->exception_oop());
-  }
-JRT_END
-
 // counter_overflow() is called from within C1-compiled methods. The enclosing method is the method
 // associated with the top activation record. The inlinee (that is possibly included in the enclosing
 // method) method oop is passed as an argument. In order to do that it is embedded in the code as
--- a/hotspot/src/share/vm/c1/c1_Runtime1.hpp	Wed Aug 31 09:48:21 2011 -0700
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.hpp	Wed Aug 31 16:46:11 2011 -0700
@@ -65,7 +65,6 @@
   stub(monitorexit_nofpu)              /* optimized version that does not preserve fpu registers */ \
   stub(access_field_patching)        \
   stub(load_klass_patching)          \
-  stub(jvmti_exception_throw)        \
   stub(g1_pre_barrier_slow)          \
   stub(g1_post_barrier_slow)         \
   stub(fpu2long_stub)                \
@@ -141,7 +140,6 @@
   static void unimplemented_entry   (JavaThread* thread, StubID id);
 
   static address exception_handler_for_pc(JavaThread* thread);
-  static void post_jvmti_exception_throw(JavaThread* thread);
 
   static void throw_range_check_exception(JavaThread* thread, int index);
   static void throw_index_exception(JavaThread* thread, int index);
--- a/hotspot/src/share/vm/ci/ciConstant.hpp	Wed Aug 31 09:48:21 2011 -0700
+++ b/hotspot/src/share/vm/ci/ciConstant.hpp	Wed Aug 31 16:46:11 2011 -0700
@@ -46,9 +46,6 @@
     ciObject* _object;
   } _value;
 
-  // Implementation of the print method.
-  void print_impl(outputStream* st);
-
 public:
 
   ciConstant() {
--- a/hotspot/src/share/vm/ci/ciEnv.cpp	Wed Aug 31 09:48:21 2011 -0700
+++ b/hotspot/src/share/vm/ci/ciEnv.cpp	Wed Aug 31 16:46:11 2011 -0700
@@ -949,7 +949,6 @@
                             ImplicitExceptionTable* inc_table,
                             AbstractCompiler* compiler,
                             int comp_level,
-                            bool has_debug_info,
                             bool has_unsafe_access) {
   VM_ENTRY_MARK;
   nmethod* nm = NULL;
@@ -1044,7 +1043,6 @@
         CompileBroker::handle_full_code_cache();
       }
     } else {
-      NOT_PRODUCT(nm->set_has_debug_info(has_debug_info); )
       nm->set_has_unsafe_access(has_unsafe_access);
 
       // Record successful registration.
--- a/hotspot/src/share/vm/ci/ciEnv.hpp	Wed Aug 31 09:48:21 2011 -0700
+++ b/hotspot/src/share/vm/ci/ciEnv.hpp	Wed Aug 31 16:46:11 2011 -0700
@@ -317,8 +317,7 @@
                        ImplicitExceptionTable*   inc_table,
                        AbstractCompiler*         compiler,
                        int                       comp_level,
-                       bool                      has_debug_info = true,
-                       bool                      has_unsafe_access = false);
+                       bool                      has_unsafe_access);
 
 
   // Access to certain well known ciObjects.
--- a/hotspot/src/share/vm/ci/ciField.hpp	Wed Aug 31 09:48:21 2011 -0700
+++ b/hotspot/src/share/vm/ci/ciField.hpp	Wed Aug 31 16:46:11 2011 -0700
@@ -64,9 +64,6 @@
   // shared constructor code
   void initialize_from(fieldDescriptor* fd);
 
-  // The implementation of the print method.
-  void print_impl(outputStream* st);
-
 public:
   ciFlags flags() { return _flags; }
 
--- a/hotspot/src/share/vm/code/nmethod.cpp	Wed Aug 31 09:48:21 2011 -0700
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Wed Aug 31 16:46:11 2011 -0700
@@ -451,7 +451,6 @@
   _stack_traversal_mark       = 0;
   _unload_reported            = false;           // jvmti state
 
-  NOT_PRODUCT(_has_debug_info = false);
 #ifdef ASSERT
   _oops_are_stale             = false;
 #endif
--- a/hotspot/src/share/vm/code/nmethod.hpp	Wed Aug 31 09:48:21 2011 -0700
+++ b/hotspot/src/share/vm/code/nmethod.hpp	Wed Aug 31 16:46:11 2011 -0700
@@ -191,8 +191,6 @@
 
   jbyte _scavenge_root_state;
 
-  NOT_PRODUCT(bool _has_debug_info; )
-
   // Nmethod Flushing lock. If non-zero, then the nmethod is not removed
   // and is not made into a zombie. However, once the nmethod is made into
   // a zombie, it will be locked one final time if CompiledMethodUnload
@@ -329,11 +327,6 @@
   methodOop method() const                        { return _method; }
   AbstractCompiler* compiler() const              { return _compiler; }
 
-#ifndef PRODUCT
-  bool has_debug_info() const                     { return _has_debug_info; }
-  void set_has_debug_info(bool f)                 { _has_debug_info = false; }
-#endif // NOT PRODUCT
-
   // type info
   bool is_nmethod() const                         { return true; }
   bool is_java_method() const                     { return !method()->is_native(); }
--- a/hotspot/src/share/vm/oops/constMethodKlass.cpp	Wed Aug 31 09:48:21 2011 -0700
+++ b/hotspot/src/share/vm/oops/constMethodKlass.cpp	Wed Aug 31 16:46:11 2011 -0700
@@ -172,11 +172,6 @@
 int constMethodKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
   assert(obj->is_constMethod(), "should be constMethod");
   constMethodOop cm_oop = constMethodOop(obj);
-#if 0
-  PSParallelCompact::adjust_pointer(cm_oop->adr_method());
-  PSParallelCompact::adjust_pointer(cm_oop->adr_exception_table());
-  PSParallelCompact::adjust_pointer(cm_oop->adr_stackmap_data());
-#endif
   oop* const beg_oop = cm_oop->oop_block_beg();
   oop* const end_oop = cm_oop->oop_block_end();
   for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) {
--- a/hotspot/src/share/vm/opto/compile.cpp	Wed Aug 31 09:48:21 2011 -0700
+++ b/hotspot/src/share/vm/opto/compile.cpp	Wed Aug 31 16:46:11 2011 -0700
@@ -817,7 +817,6 @@
                            &_handler_table, &_inc_table,
                            compiler,
                            env()->comp_level(),
-                           true, /*has_debug_info*/
                            has_unsafe_access()
                            );
   }
--- a/hotspot/src/share/vm/opto/connode.hpp	Wed Aug 31 09:48:21 2011 -0700
+++ b/hotspot/src/share/vm/opto/connode.hpp	Wed Aug 31 16:46:11 2011 -0700
@@ -496,14 +496,6 @@
   virtual bool depends_only_on_test() const { return false; }
 };
 
-//------------------------------MemMoveNode------------------------------------
-// Memory to memory move.  Inserted very late, after allocation.
-class MemMoveNode : public Node {
-public:
-  MemMoveNode( Node *dst, Node *src ) : Node(0,dst,src) {}
-  virtual int Opcode() const;
-};
-
 //------------------------------ThreadLocalNode--------------------------------
 // Ideal Node which returns the base of ThreadLocalStorage.
 class ThreadLocalNode : public Node {
--- a/hotspot/src/share/vm/opto/parse2.cpp	Wed Aug 31 09:48:21 2011 -0700
+++ b/hotspot/src/share/vm/opto/parse2.cpp	Wed Aug 31 16:46:11 2011 -0700
@@ -752,20 +752,12 @@
 // Handle ret bytecode
 void Parse::do_ret() {
   // Find to whom we return.
-#if 0 // %%%% MAKE THIS WORK
-  Node* con = local();
-  const TypePtr* tp = con->bottom_type()->isa_ptr();
-  assert(tp && tp->singleton(), "");
-  int return_bci = (int) tp->get_con();
-  merge(return_bci);
-#else
   assert(block()->num_successors() == 1, "a ret can only go one place now");
   Block* target = block()->successor_at(0);
   assert(!target->is_ready(), "our arrival must be expected");
   profile_ret(target->flow()->start());
   int pnum = target->next_path_num();
   merge_common(target, pnum);
-#endif
 }
 
 //--------------------------dynamic_branch_prediction--------------------------
--- a/hotspot/src/share/vm/opto/runtime.cpp	Wed Aug 31 09:48:21 2011 -0700
+++ b/hotspot/src/share/vm/opto/runtime.cpp	Wed Aug 31 16:46:11 2011 -0700
@@ -978,7 +978,6 @@
 
     thread->set_exception_pc(pc);
     thread->set_exception_handler_pc(handler_address);
-    thread->set_exception_stack_size(0);
 
     // Check if the exception PC is a MethodHandle call site.
     thread->set_is_method_handle_return(nm->is_method_handle_return(pc));
--- a/hotspot/src/share/vm/prims/forte.cpp	Wed Aug 31 09:48:21 2011 -0700
+++ b/hotspot/src/share/vm/prims/forte.cpp	Wed Aug 31 16:46:11 2011 -0700
@@ -522,25 +522,6 @@
 extern "C" {
 JNIEXPORT
 void AsyncGetCallTrace(ASGCT_CallTrace *trace, jint depth, void* ucontext) {
-
-// This is if'd out because we no longer use thread suspension.
-// However if someone wanted to backport this to a 5.0 jvm then this
-// code would be important.
-#if 0
-  if (SafepointSynchronize::is_synchronizing()) {
-    // The safepoint mechanism is trying to synchronize all the threads.
-    // Since this can involve thread suspension, it is not safe for us
-    // to be here. We can reduce the deadlock risk window by quickly
-    // returning to the SIGPROF handler. However, it is still possible
-    // for VMThread to catch us here or in the SIGPROF handler. If we
-    // are suspended while holding a resource and another thread blocks
-    // on that resource in the SIGPROF handler, then we will have a
-    // three-thread deadlock (VMThread, this thread, the other thread).
-    trace->num_frames = ticks_safepoint; // -10
-    return;
-  }
-#endif
-
   JavaThread* thread;
 
   if (trace->env_id == NULL ||
--- a/hotspot/src/share/vm/runtime/thread.cpp	Wed Aug 31 09:48:21 2011 -0700
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Wed Aug 31 16:46:11 2011 -0700
@@ -1272,7 +1272,6 @@
   _exception_oop = NULL;
   _exception_pc  = 0;
   _exception_handler_pc = 0;
-  _exception_stack_size = 0;
   _is_method_handle_return = 0;
   _jvmti_thread_state= NULL;
   _should_post_on_exceptions_flag = JNI_FALSE;
--- a/hotspot/src/share/vm/runtime/thread.hpp	Wed Aug 31 09:48:21 2011 -0700
+++ b/hotspot/src/share/vm/runtime/thread.hpp	Wed Aug 31 16:46:11 2011 -0700
@@ -841,7 +841,6 @@
   volatile oop     _exception_oop;               // Exception thrown in compiled code
   volatile address _exception_pc;                // PC where exception happened
   volatile address _exception_handler_pc;        // PC for handler of exception
-  volatile int     _exception_stack_size;        // Size of frame where exception happened
   volatile int     _is_method_handle_return;     // true (== 1) if the current exception PC is a MethodHandle call site.
 
   // support for compilation
@@ -1182,7 +1181,6 @@
 
   // Exception handling for compiled methods
   oop      exception_oop() const                 { return _exception_oop; }
-  int      exception_stack_size() const          { return _exception_stack_size; }
   address  exception_pc() const                  { return _exception_pc; }
   address  exception_handler_pc() const          { return _exception_handler_pc; }
   bool     is_method_handle_return() const       { return _is_method_handle_return == 1; }
@@ -1190,7 +1188,6 @@
   void set_exception_oop(oop o)                  { _exception_oop = o; }
   void set_exception_pc(address a)               { _exception_pc = a; }
   void set_exception_handler_pc(address a)       { _exception_handler_pc = a; }
-  void set_exception_stack_size(int size)        { _exception_stack_size = size; }
   void set_is_method_handle_return(bool value)   { _is_method_handle_return = value ? 1 : 0; }
 
   // Stack overflow support
@@ -1264,7 +1261,6 @@
   static ByteSize exception_oop_offset()         { return byte_offset_of(JavaThread, _exception_oop       ); }
   static ByteSize exception_pc_offset()          { return byte_offset_of(JavaThread, _exception_pc        ); }
   static ByteSize exception_handler_pc_offset()  { return byte_offset_of(JavaThread, _exception_handler_pc); }
-  static ByteSize exception_stack_size_offset()  { return byte_offset_of(JavaThread, _exception_stack_size); }
   static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); }
   static ByteSize stack_guard_state_offset()     { return byte_offset_of(JavaThread, _stack_guard_state   ); }
   static ByteSize suspend_flags_offset()         { return byte_offset_of(JavaThread, _suspend_flags       ); }