hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp
changeset 4013 b154310845de
child 4647 51451c7e9103
equal deleted inserted replaced
4012:579b7bad9983 4013:b154310845de
       
     1 /*
       
     2  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
       
     3  * Copyright 2007, 2008 Red Hat, Inc.
       
     4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
       
     5  *
       
     6  * This code is free software; you can redistribute it and/or modify it
       
     7  * under the terms of the GNU General Public License version 2 only, as
       
     8  * published by the Free Software Foundation.
       
     9  *
       
    10  * This code is distributed in the hope that it will be useful, but WITHOUT
       
    11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
       
    13  * version 2 for more details (a copy is included in the LICENSE file that
       
    14  * accompanied this code).
       
    15  *
       
    16  * You should have received a copy of the GNU General Public License version
       
    17  * 2 along with this work; if not, write to the Free Software Foundation,
       
    18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
       
    19  *
       
    20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
       
    21  * CA 95054 USA or visit www.sun.com if you need additional information or
       
    22  * have any questions.
       
    23  *
       
    24  */
       
    25 
       
    26 // do not include precompiled header file
       
    27 #include "incls/_os_linux_zero.cpp.incl"
       
    28 
       
    29 address os::current_stack_pointer() {
       
    30   address dummy = (address) &dummy;
       
    31   return dummy;
       
    32 }
       
    33 
       
    34 frame os::get_sender_for_C_frame(frame* fr) {
       
    35   ShouldNotCallThis();
       
    36 }
       
    37 
       
    38 frame os::current_frame() {
       
    39   // The only thing that calls this is the stack printing code in
       
    40   // VMError::report:
       
    41   //   - Step 110 (printing stack bounds) uses the sp in the frame
       
    42   //     to determine the amount of free space on the stack.  We
       
    43   //     set the sp to a close approximation of the real value in
       
    44   //     order to allow this step to complete.
       
    45   //   - Step 120 (printing native stack) tries to walk the stack.
       
    46   //     The frame we create has a NULL pc, which is ignored as an
       
    47   //     invalid frame.
       
    48   frame dummy = frame();
       
    49   dummy.set_sp((intptr_t *) current_stack_pointer());
       
    50   return dummy;
       
    51 }
       
    52 
       
    53 char* os::non_memory_address_word() {
       
    54   // Must never look like an address returned by reserve_memory,
       
    55   // even in its subfields (as defined by the CPU immediate fields,
       
    56   // if the CPU splits constants across multiple instructions).
       
    57 #ifdef SPARC
       
    58   // On SPARC, 0 != %hi(any real address), because there is no
       
    59   // allocation in the first 1Kb of the virtual address space.
       
    60   return (char *) 0;
       
    61 #else
       
    62   // This is the value for x86; works pretty well for PPC too.
       
    63   return (char *) -1;
       
    64 #endif // SPARC
       
    65 }
       
    66 
       
    67 void os::initialize_thread() {
       
    68   // Nothing to do.
       
    69 }
       
    70 
       
    71 address os::Linux::ucontext_get_pc(ucontext_t* uc) {
       
    72   ShouldNotCallThis();
       
    73 }
       
    74 
       
    75 ExtendedPC os::fetch_frame_from_context(void* ucVoid,
       
    76                                         intptr_t** ret_sp,
       
    77                                         intptr_t** ret_fp) {
       
    78   ShouldNotCallThis();
       
    79 }
       
    80 
       
    81 frame os::fetch_frame_from_context(void* ucVoid) {
       
    82   ShouldNotCallThis();
       
    83 }
       
    84 
       
    85 extern "C" int
       
    86 JVM_handle_linux_signal(int sig,
       
    87                         siginfo_t* info,
       
    88                         void* ucVoid,
       
    89                         int abort_if_unrecognized) {
       
    90   ucontext_t* uc = (ucontext_t*) ucVoid;
       
    91 
       
    92   Thread* t = ThreadLocalStorage::get_thread_slow();
       
    93 
       
    94   SignalHandlerMark shm(t);
       
    95 
       
    96   // Note: it's not uncommon that JNI code uses signal/sigset to
       
    97   // install then restore certain signal handler (e.g. to temporarily
       
    98   // block SIGPIPE, or have a SIGILL handler when detecting CPU
       
    99   // type). When that happens, JVM_handle_linux_signal() might be
       
   100   // invoked with junk info/ucVoid. To avoid unnecessary crash when
       
   101   // libjsig is not preloaded, try handle signals that do not require
       
   102   // siginfo/ucontext first.
       
   103 
       
   104   if (sig == SIGPIPE || sig == SIGXFSZ) {
       
   105     // allow chained handler to go first
       
   106     if (os::Linux::chained_handler(sig, info, ucVoid)) {
       
   107       return true;
       
   108     } else {
       
   109       if (PrintMiscellaneous && (WizardMode || Verbose)) {
       
   110         char buf[64];
       
   111         warning("Ignoring %s - see bugs 4229104 or 646499219",
       
   112                 os::exception_name(sig, buf, sizeof(buf)));
       
   113       }
       
   114       return true;
       
   115     }
       
   116   }
       
   117 
       
   118   JavaThread* thread = NULL;
       
   119   VMThread* vmthread = NULL;
       
   120   if (os::Linux::signal_handlers_are_installed) {
       
   121     if (t != NULL ){
       
   122       if(t->is_Java_thread()) {
       
   123         thread = (JavaThread*)t;
       
   124       }
       
   125       else if(t->is_VM_thread()){
       
   126         vmthread = (VMThread *)t;
       
   127       }
       
   128     }
       
   129   }
       
   130 
       
   131   if (info != NULL && thread != NULL) {
       
   132     // Handle ALL stack overflow variations here
       
   133     if (sig == SIGSEGV) {
       
   134       address addr = (address) info->si_addr;
       
   135 
       
   136       // check if fault address is within thread stack
       
   137       if (addr < thread->stack_base() &&
       
   138           addr >= thread->stack_base() - thread->stack_size()) {
       
   139         // stack overflow
       
   140         if (thread->in_stack_yellow_zone(addr)) {
       
   141           thread->disable_stack_yellow_zone();
       
   142           ShouldNotCallThis();
       
   143         }
       
   144         else if (thread->in_stack_red_zone(addr)) {
       
   145           thread->disable_stack_red_zone();
       
   146           ShouldNotCallThis();
       
   147         }
       
   148         else {
       
   149           // Accessing stack address below sp may cause SEGV if
       
   150           // current thread has MAP_GROWSDOWN stack. This should
       
   151           // only happen when current thread was created by user
       
   152           // code with MAP_GROWSDOWN flag and then attached to VM.
       
   153           // See notes in os_linux.cpp.
       
   154           if (thread->osthread()->expanding_stack() == 0) {
       
   155             thread->osthread()->set_expanding_stack();
       
   156             if (os::Linux::manually_expand_stack(thread, addr)) {
       
   157               thread->osthread()->clear_expanding_stack();
       
   158               return true;
       
   159             }
       
   160             thread->osthread()->clear_expanding_stack();
       
   161           }
       
   162           else {
       
   163             fatal("recursive segv. expanding stack.");
       
   164           }
       
   165         }
       
   166       }
       
   167     }
       
   168 
       
   169     /*if (thread->thread_state() == _thread_in_Java) {
       
   170       ShouldNotCallThis();
       
   171     }
       
   172     else*/ if (thread->thread_state() == _thread_in_vm &&
       
   173                sig == SIGBUS && thread->doing_unsafe_access()) {
       
   174       ShouldNotCallThis();
       
   175     }
       
   176 
       
   177     // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC
       
   178     // kicks in and the heap gets shrunk before the field access.
       
   179     /*if (sig == SIGSEGV || sig == SIGBUS) {
       
   180       address addr = JNI_FastGetField::find_slowcase_pc(pc);
       
   181       if (addr != (address)-1) {
       
   182         stub = addr;
       
   183       }
       
   184     }*/
       
   185 
       
   186     // Check to see if we caught the safepoint code in the process
       
   187     // of write protecting the memory serialization page.  It write
       
   188     // enables the page immediately after protecting it so we can
       
   189     // just return to retry the write.
       
   190     if (sig == SIGSEGV &&
       
   191         os::is_memory_serialize_page(thread, (address) info->si_addr)) {
       
   192       // Block current thread until permission is restored.
       
   193       os::block_on_serialize_page_trap();
       
   194       return true;
       
   195     }
       
   196   }
       
   197 
       
   198   // signal-chaining
       
   199   if (os::Linux::chained_handler(sig, info, ucVoid)) {
       
   200      return true;
       
   201   }
       
   202 
       
   203   if (!abort_if_unrecognized) {
       
   204     // caller wants another chance, so give it to him
       
   205     return false;
       
   206   }
       
   207 
       
   208 #ifndef PRODUCT
       
   209   if (sig == SIGSEGV) {
       
   210     fatal("\n#"
       
   211           "\n#    /--------------------\\"
       
   212           "\n#    | segmentation fault |"
       
   213           "\n#    \\---\\ /--------------/"
       
   214           "\n#        /"
       
   215           "\n#    [-]        |\\_/|    "
       
   216           "\n#    (+)=C      |o o|__  "
       
   217           "\n#    | |        =-*-=__\\ "
       
   218           "\n#    OOO        c_c_(___)");
       
   219   }
       
   220 #endif // !PRODUCT
       
   221 
       
   222   const char *fmt = "caught unhandled signal %d";
       
   223   char buf[64];
       
   224 
       
   225   sprintf(buf, fmt, sig);
       
   226   fatal(buf);
       
   227 }
       
   228 
       
   229 void os::Linux::init_thread_fpu_state(void) {
       
   230   // Nothing to do
       
   231 }
       
   232 
       
   233 int os::Linux::get_fpu_control_word() {
       
   234   ShouldNotCallThis();
       
   235 }
       
   236 
       
   237 void os::Linux::set_fpu_control_word(int fpu) {
       
   238   ShouldNotCallThis();
       
   239 }
       
   240 
       
   241 bool os::is_allocatable(size_t bytes) {
       
   242   ShouldNotCallThis();
       
   243 }
       
   244 
       
   245 ///////////////////////////////////////////////////////////////////////////////
       
   246 // thread stack
       
   247 
       
   248 size_t os::Linux::min_stack_allowed = 64 * K;
       
   249 
       
   250 bool os::Linux::supports_variable_stack_size() {
       
   251   return true;
       
   252 }
       
   253 
       
   254 size_t os::Linux::default_stack_size(os::ThreadType thr_type) {
       
   255 #ifdef _LP64
       
   256   size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
       
   257 #else
       
   258   size_t s = (thr_type == os::compiler_thread ? 2 * M : 512 * K);
       
   259 #endif // _LP64
       
   260   return s;
       
   261 }
       
   262 
       
   263 size_t os::Linux::default_guard_size(os::ThreadType thr_type) {
       
   264   // Only enable glibc guard pages for non-Java threads
       
   265   // (Java threads have HotSpot guard pages)
       
   266   return (thr_type == java_thread ? 0 : page_size());
       
   267 }
       
   268 
       
   269 static void current_stack_region(address *bottom, size_t *size) {
       
   270   pthread_attr_t attr;
       
   271   int res = pthread_getattr_np(pthread_self(), &attr);
       
   272   if (res != 0) {
       
   273     if (res == ENOMEM) {
       
   274       vm_exit_out_of_memory(0, "pthread_getattr_np");
       
   275     }
       
   276     else {
       
   277       fatal1("pthread_getattr_np failed with errno = %d", res);
       
   278     }
       
   279   }
       
   280 
       
   281   address stack_bottom;
       
   282   size_t stack_bytes;
       
   283   res = pthread_attr_getstack(&attr, (void **) &stack_bottom, &stack_bytes);
       
   284   if (res != 0) {
       
   285     fatal1("pthread_attr_getstack failed with errno = %d", res);
       
   286   }
       
   287   address stack_top = stack_bottom + stack_bytes;
       
   288 
       
   289   // The block of memory returned by pthread_attr_getstack() includes
       
   290   // guard pages where present.  We need to trim these off.
       
   291   size_t page_bytes = os::Linux::page_size();
       
   292   assert(((intptr_t) stack_bottom & (page_bytes - 1)) == 0, "unaligned stack");
       
   293 
       
   294   size_t guard_bytes;
       
   295   res = pthread_attr_getguardsize(&attr, &guard_bytes);
       
   296   if (res != 0) {
       
   297     fatal1("pthread_attr_getguardsize failed with errno = %d", res);
       
   298   }
       
   299   int guard_pages = align_size_up(guard_bytes, page_bytes) / page_bytes;
       
   300   assert(guard_bytes == guard_pages * page_bytes, "unaligned guard");
       
   301 
       
   302 #ifdef IA64
       
   303   // IA64 has two stacks sharing the same area of memory, a normal
       
   304   // stack growing downwards and a register stack growing upwards.
       
   305   // Guard pages, if present, are in the centre.  This code splits
       
   306   // the stack in two even without guard pages, though in theory
       
   307   // there's nothing to stop us allocating more to the normal stack
       
   308   // or more to the register stack if one or the other were found
       
   309   // to grow faster.
       
   310   int total_pages = align_size_down(stack_bytes, page_bytes) / page_bytes;
       
   311   stack_bottom += (total_pages - guard_pages) / 2 * page_bytes;
       
   312 #endif // IA64
       
   313 
       
   314   stack_bottom += guard_bytes;
       
   315 
       
   316   pthread_attr_destroy(&attr);
       
   317 
       
   318   // The initial thread has a growable stack, and the size reported
       
   319   // by pthread_attr_getstack is the maximum size it could possibly
       
   320   // be given what currently mapped.  This can be huge, so we cap it.
       
   321   if (os::Linux::is_initial_thread()) {
       
   322     stack_bytes = stack_top - stack_bottom;
       
   323 
       
   324     if (stack_bytes > JavaThread::stack_size_at_create())
       
   325       stack_bytes = JavaThread::stack_size_at_create();
       
   326 
       
   327     stack_bottom = stack_top - stack_bytes;
       
   328   }
       
   329 
       
   330   assert(os::current_stack_pointer() >= stack_bottom, "should do");
       
   331   assert(os::current_stack_pointer() < stack_top, "should do");
       
   332 
       
   333   *bottom = stack_bottom;
       
   334   *size = stack_top - stack_bottom;
       
   335 }
       
   336 
       
   337 address os::current_stack_base() {
       
   338   address bottom;
       
   339   size_t size;
       
   340   current_stack_region(&bottom, &size);
       
   341   return bottom + size;
       
   342 }
       
   343 
       
   344 size_t os::current_stack_size() {
       
   345   // stack size includes normal stack and HotSpot guard pages
       
   346   address bottom;
       
   347   size_t size;
       
   348   current_stack_region(&bottom, &size);
       
   349   return size;
       
   350 }
       
   351 
       
   352 /////////////////////////////////////////////////////////////////////////////
       
   353 // helper functions for fatal error handler
       
   354 
       
   355 void os::print_context(outputStream* st, void* context) {
       
   356   ShouldNotCallThis();
       
   357 }
       
   358 
       
   359 /////////////////////////////////////////////////////////////////////////////
       
   360 // Stubs for things that would be in linux_zero.s if it existed.
       
   361 // You probably want to disassemble these monkeys to check they're ok.
       
   362 
       
   363 extern "C" {
       
   364   int SpinPause() {
       
   365   }
       
   366 
       
   367   int SafeFetch32(int *adr, int errValue) {
       
   368     int value = errValue;
       
   369     value = *adr;
       
   370     return value;
       
   371   }
       
   372   intptr_t SafeFetchN(intptr_t *adr, intptr_t errValue) {
       
   373     intptr_t value = errValue;
       
   374     value = *adr;
       
   375     return value;
       
   376   }
       
   377 
       
   378   void _Copy_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
       
   379     if (from > to) {
       
   380       jshort *end = from + count;
       
   381       while (from < end)
       
   382         *(to++) = *(from++);
       
   383     }
       
   384     else if (from < to) {
       
   385       jshort *end = from;
       
   386       from += count - 1;
       
   387       to   += count - 1;
       
   388       while (from >= end)
       
   389         *(to--) = *(from--);
       
   390     }
       
   391   }
       
   392   void _Copy_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
       
   393     if (from > to) {
       
   394       jint *end = from + count;
       
   395       while (from < end)
       
   396         *(to++) = *(from++);
       
   397     }
       
   398     else if (from < to) {
       
   399       jint *end = from;
       
   400       from += count - 1;
       
   401       to   += count - 1;
       
   402       while (from >= end)
       
   403         *(to--) = *(from--);
       
   404     }
       
   405   }
       
   406   void _Copy_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
       
   407     if (from > to) {
       
   408       jlong *end = from + count;
       
   409       while (from < end)
       
   410         os::atomic_copy64(from++, to++);
       
   411     }
       
   412     else if (from < to) {
       
   413       jlong *end = from;
       
   414       from += count - 1;
       
   415       to   += count - 1;
       
   416       while (from >= end)
       
   417         os::atomic_copy64(from--, to--);
       
   418     }
       
   419   }
       
   420 
       
   421   void _Copy_arrayof_conjoint_bytes(HeapWord* from,
       
   422                                     HeapWord* to,
       
   423                                     size_t    count) {
       
   424     ShouldNotCallThis();
       
   425   }
       
   426   void _Copy_arrayof_conjoint_jshorts(HeapWord* from,
       
   427                                       HeapWord* to,
       
   428                                       size_t    count) {
       
   429     ShouldNotCallThis();
       
   430   }
       
   431   void _Copy_arrayof_conjoint_jints(HeapWord* from,
       
   432                                     HeapWord* to,
       
   433                                     size_t    count) {
       
   434     ShouldNotCallThis();
       
   435   }
       
   436   void _Copy_arrayof_conjoint_jlongs(HeapWord* from,
       
   437                                      HeapWord* to,
       
   438                                      size_t    count) {
       
   439     ShouldNotCallThis();
       
   440   }
       
   441 };
       
   442 
       
   443 /////////////////////////////////////////////////////////////////////////////
       
   444 // Implementations of atomic operations not supported by processors.
       
   445 //  -- http://gcc.gnu.org/onlinedocs/gcc-4.2.1/gcc/Atomic-Builtins.html
       
   446 
       
   447 #ifndef _LP64
       
   448 extern "C" {
       
   449   long long unsigned int __sync_val_compare_and_swap_8(
       
   450     volatile void *ptr,
       
   451     long long unsigned int oldval,
       
   452     long long unsigned int newval) {
       
   453     ShouldNotCallThis();
       
   454   }
       
   455 };
       
   456 #endif // !_LP64