hotspot/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp
changeset 10565 dc90c239f4ec
child 10739 91935236600e
equal deleted inserted replaced
10564:db5bf5438c0a 10565:dc90c239f4ec
       
     1 /*
       
     2  * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
       
     3  * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
       
     4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
       
     5  *
       
     6  * This code is free software; you can redistribute it and/or modify it
       
     7  * under the terms of the GNU General Public License version 2 only, as
       
     8  * published by the Free Software Foundation.
       
     9  *
       
    10  * This code is distributed in the hope that it will be useful, but WITHOUT
       
    11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
       
    13  * version 2 for more details (a copy is included in the LICENSE file that
       
    14  * accompanied this code).
       
    15  *
       
    16  * You should have received a copy of the GNU General Public License version
       
    17  * 2 along with this work; if not, write to the Free Software Foundation,
       
    18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
       
    19  *
       
    20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
       
    21  * or visit www.oracle.com if you need additional information or have any
       
    22  * questions.
       
    23  *
       
    24  */
       
    25 
       
    26 #if defined(_ALLBSD_SOURCE) && !defined(__APPLE__) && !defined(__NetBSD__)
       
    27 #include <pthread.h>
       
    28 # include <pthread_np.h> /* For pthread_attr_get_np */
       
    29 #endif
       
    30 
       
    31 // no precompiled headers
       
    32 #include "assembler_zero.inline.hpp"
       
    33 #include "classfile/classLoader.hpp"
       
    34 #include "classfile/systemDictionary.hpp"
       
    35 #include "classfile/vmSymbols.hpp"
       
    36 #include "code/icBuffer.hpp"
       
    37 #include "code/vtableStubs.hpp"
       
    38 #include "interpreter/interpreter.hpp"
       
    39 #include "jvm_bsd.h"
       
    40 #include "memory/allocation.inline.hpp"
       
    41 #include "mutex_bsd.inline.hpp"
       
    42 #include "nativeInst_zero.hpp"
       
    43 #include "os_share_bsd.hpp"
       
    44 #include "prims/jniFastGetField.hpp"
       
    45 #include "prims/jvm.h"
       
    46 #include "prims/jvm_misc.hpp"
       
    47 #include "runtime/arguments.hpp"
       
    48 #include "runtime/extendedPC.hpp"
       
    49 #include "runtime/frame.inline.hpp"
       
    50 #include "runtime/interfaceSupport.hpp"
       
    51 #include "runtime/java.hpp"
       
    52 #include "runtime/javaCalls.hpp"
       
    53 #include "runtime/mutexLocker.hpp"
       
    54 #include "runtime/osThread.hpp"
       
    55 #include "runtime/sharedRuntime.hpp"
       
    56 #include "runtime/stubRoutines.hpp"
       
    57 #include "runtime/timer.hpp"
       
    58 #include "thread_bsd.inline.hpp"
       
    59 #include "utilities/events.hpp"
       
    60 #include "utilities/vmError.hpp"
       
    61 #ifdef COMPILER1
       
    62 #include "c1/c1_Runtime1.hpp"
       
    63 #endif
       
    64 #ifdef COMPILER2
       
    65 #include "opto/runtime.hpp"
       
    66 #endif
       
    67 
       
    68 address os::current_stack_pointer() {
       
    69   address dummy = (address) &dummy;
       
    70   return dummy;
       
    71 }
       
    72 
       
    73 frame os::get_sender_for_C_frame(frame* fr) {
       
    74   ShouldNotCallThis();
       
    75 }
       
    76 
       
    77 frame os::current_frame() {
       
    78   // The only thing that calls this is the stack printing code in
       
    79   // VMError::report:
       
    80   //   - Step 110 (printing stack bounds) uses the sp in the frame
       
    81   //     to determine the amount of free space on the stack.  We
       
    82   //     set the sp to a close approximation of the real value in
       
    83   //     order to allow this step to complete.
       
    84   //   - Step 120 (printing native stack) tries to walk the stack.
       
    85   //     The frame we create has a NULL pc, which is ignored as an
       
    86   //     invalid frame.
       
    87   frame dummy = frame();
       
    88   dummy.set_sp((intptr_t *) current_stack_pointer());
       
    89   return dummy;
       
    90 }
       
    91 
       
    92 char* os::non_memory_address_word() {
       
    93   // Must never look like an address returned by reserve_memory,
       
    94   // even in its subfields (as defined by the CPU immediate fields,
       
    95   // if the CPU splits constants across multiple instructions).
       
    96 #ifdef SPARC
       
    97   // On SPARC, 0 != %hi(any real address), because there is no
       
    98   // allocation in the first 1Kb of the virtual address space.
       
    99   return (char *) 0;
       
   100 #else
       
   101   // This is the value for x86; works pretty well for PPC too.
       
   102   return (char *) -1;
       
   103 #endif // SPARC
       
   104 }
       
   105 
       
   106 void os::initialize_thread() {
       
   107   // Nothing to do.
       
   108 }
       
   109 
       
   110 address os::Bsd::ucontext_get_pc(ucontext_t* uc) {
       
   111   ShouldNotCallThis();
       
   112 }
       
   113 
       
   114 ExtendedPC os::fetch_frame_from_context(void* ucVoid,
       
   115                                         intptr_t** ret_sp,
       
   116                                         intptr_t** ret_fp) {
       
   117   ShouldNotCallThis();
       
   118 }
       
   119 
       
   120 frame os::fetch_frame_from_context(void* ucVoid) {
       
   121   ShouldNotCallThis();
       
   122 }
       
   123 
       
   124 extern "C" JNIEXPORT int
       
   125 JVM_handle_bsd_signal(int sig,
       
   126                         siginfo_t* info,
       
   127                         void* ucVoid,
       
   128                         int abort_if_unrecognized) {
       
   129   ucontext_t* uc = (ucontext_t*) ucVoid;
       
   130 
       
   131   Thread* t = ThreadLocalStorage::get_thread_slow();
       
   132 
       
   133   SignalHandlerMark shm(t);
       
   134 
       
   135   // Note: it's not uncommon that JNI code uses signal/sigset to
       
   136   // install then restore certain signal handler (e.g. to temporarily
       
   137   // block SIGPIPE, or have a SIGILL handler when detecting CPU
       
   138   // type). When that happens, JVM_handle_bsd_signal() might be
       
   139   // invoked with junk info/ucVoid. To avoid unnecessary crash when
       
   140   // libjsig is not preloaded, try handle signals that do not require
       
   141   // siginfo/ucontext first.
       
   142 
       
   143   if (sig == SIGPIPE || sig == SIGXFSZ) {
       
   144     // allow chained handler to go first
       
   145     if (os::Bsd::chained_handler(sig, info, ucVoid)) {
       
   146       return true;
       
   147     } else {
       
   148       if (PrintMiscellaneous && (WizardMode || Verbose)) {
       
   149         char buf[64];
       
   150         warning("Ignoring %s - see bugs 4229104 or 646499219",
       
   151                 os::exception_name(sig, buf, sizeof(buf)));
       
   152       }
       
   153       return true;
       
   154     }
       
   155   }
       
   156 
       
   157   JavaThread* thread = NULL;
       
   158   VMThread* vmthread = NULL;
       
   159   if (os::Bsd::signal_handlers_are_installed) {
       
   160     if (t != NULL ){
       
   161       if(t->is_Java_thread()) {
       
   162         thread = (JavaThread*)t;
       
   163       }
       
   164       else if(t->is_VM_thread()){
       
   165         vmthread = (VMThread *)t;
       
   166       }
       
   167     }
       
   168   }
       
   169 
       
   170   if (info != NULL && thread != NULL) {
       
   171     // Handle ALL stack overflow variations here
       
   172     if (sig == SIGSEGV) {
       
   173       address addr = (address) info->si_addr;
       
   174 
       
   175       // check if fault address is within thread stack
       
   176       if (addr < thread->stack_base() &&
       
   177           addr >= thread->stack_base() - thread->stack_size()) {
       
   178         // stack overflow
       
   179         if (thread->in_stack_yellow_zone(addr)) {
       
   180           thread->disable_stack_yellow_zone();
       
   181           ShouldNotCallThis();
       
   182         }
       
   183         else if (thread->in_stack_red_zone(addr)) {
       
   184           thread->disable_stack_red_zone();
       
   185           ShouldNotCallThis();
       
   186         }
       
   187 #ifndef _ALLBSD_SOURCE
       
   188         else {
       
   189           // Accessing stack address below sp may cause SEGV if
       
   190           // current thread has MAP_GROWSDOWN stack. This should
       
   191           // only happen when current thread was created by user
       
   192           // code with MAP_GROWSDOWN flag and then attached to VM.
       
   193           // See notes in os_bsd.cpp.
       
   194           if (thread->osthread()->expanding_stack() == 0) {
       
   195             thread->osthread()->set_expanding_stack();
       
   196             if (os::Bsd::manually_expand_stack(thread, addr)) {
       
   197               thread->osthread()->clear_expanding_stack();
       
   198               return true;
       
   199             }
       
   200             thread->osthread()->clear_expanding_stack();
       
   201           }
       
   202           else {
       
   203             fatal("recursive segv. expanding stack.");
       
   204           }
       
   205         }
       
   206 #endif
       
   207       }
       
   208     }
       
   209 
       
   210     /*if (thread->thread_state() == _thread_in_Java) {
       
   211       ShouldNotCallThis();
       
   212     }
       
   213     else*/ if (thread->thread_state() == _thread_in_vm &&
       
   214                sig == SIGBUS && thread->doing_unsafe_access()) {
       
   215       ShouldNotCallThis();
       
   216     }
       
   217 
       
   218     // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC
       
   219     // kicks in and the heap gets shrunk before the field access.
       
   220     /*if (sig == SIGSEGV || sig == SIGBUS) {
       
   221       address addr = JNI_FastGetField::find_slowcase_pc(pc);
       
   222       if (addr != (address)-1) {
       
   223         stub = addr;
       
   224       }
       
   225     }*/
       
   226 
       
   227     // Check to see if we caught the safepoint code in the process
       
   228     // of write protecting the memory serialization page.  It write
       
   229     // enables the page immediately after protecting it so we can
       
   230     // just return to retry the write.
       
   231     if (sig == SIGSEGV &&
       
   232         os::is_memory_serialize_page(thread, (address) info->si_addr)) {
       
   233       // Block current thread until permission is restored.
       
   234       os::block_on_serialize_page_trap();
       
   235       return true;
       
   236     }
       
   237   }
       
   238 
       
   239   // signal-chaining
       
   240   if (os::Bsd::chained_handler(sig, info, ucVoid)) {
       
   241      return true;
       
   242   }
       
   243 
       
   244   if (!abort_if_unrecognized) {
       
   245     // caller wants another chance, so give it to him
       
   246     return false;
       
   247   }
       
   248 
       
   249 #ifndef PRODUCT
       
   250   if (sig == SIGSEGV) {
       
   251     fatal("\n#"
       
   252           "\n#    /--------------------\\"
       
   253           "\n#    | segmentation fault |"
       
   254           "\n#    \\---\\ /--------------/"
       
   255           "\n#        /"
       
   256           "\n#    [-]        |\\_/|    "
       
   257           "\n#    (+)=C      |o o|__  "
       
   258           "\n#    | |        =-*-=__\\ "
       
   259           "\n#    OOO        c_c_(___)");
       
   260   }
       
   261 #endif // !PRODUCT
       
   262 
       
   263   const char *fmt = "caught unhandled signal %d";
       
   264   char buf[64];
       
   265 
       
   266   sprintf(buf, fmt, sig);
       
   267   fatal(buf);
       
   268 }
       
   269 
       
   270 void os::Bsd::init_thread_fpu_state(void) {
       
   271   // Nothing to do
       
   272 }
       
   273 
       
   274 #ifndef _ALLBSD_SOURCE
       
   275 int os::Bsd::get_fpu_control_word() {
       
   276   ShouldNotCallThis();
       
   277 }
       
   278 
       
   279 void os::Bsd::set_fpu_control_word(int fpu) {
       
   280   ShouldNotCallThis();
       
   281 }
       
   282 #endif
       
   283 
       
   284 bool os::is_allocatable(size_t bytes) {
       
   285 #ifdef _LP64
       
   286   return true;
       
   287 #else
       
   288   if (bytes < 2 * G) {
       
   289     return true;
       
   290   }
       
   291 
       
   292   char* addr = reserve_memory(bytes, NULL);
       
   293 
       
   294   if (addr != NULL) {
       
   295     release_memory(addr, bytes);
       
   296   }
       
   297 
       
   298   return addr != NULL;
       
   299 #endif // _LP64
       
   300 }
       
   301 
       
   302 ///////////////////////////////////////////////////////////////////////////////
       
   303 // thread stack
       
   304 
       
   305 size_t os::Bsd::min_stack_allowed = 64 * K;
       
   306 
       
   307 bool os::Bsd::supports_variable_stack_size() {
       
   308   return true;
       
   309 }
       
   310 
       
   311 size_t os::Bsd::default_stack_size(os::ThreadType thr_type) {
       
   312 #ifdef _LP64
       
   313   size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
       
   314 #else
       
   315   size_t s = (thr_type == os::compiler_thread ? 2 * M : 512 * K);
       
   316 #endif // _LP64
       
   317   return s;
       
   318 }
       
   319 
       
   320 size_t os::Bsd::default_guard_size(os::ThreadType thr_type) {
       
   321   // Only enable glibc guard pages for non-Java threads
       
   322   // (Java threads have HotSpot guard pages)
       
   323   return (thr_type == java_thread ? 0 : page_size());
       
   324 }
       
   325 
       
   326 static void current_stack_region(address *bottom, size_t *size) {
       
   327   address stack_bottom;
       
   328   address stack_top;
       
   329   size_t stack_bytes;
       
   330 
       
   331 #ifdef __APPLE__
       
   332   pthread_t self = pthread_self();
       
   333   stack_top = (address) pthread_get_stackaddr_np(self);
       
   334   stack_bytes = pthread_get_stacksize_np(self);
       
   335   stack_bottom = stack_top - stack_bytes;
       
   336 #elif defined(__OpenBSD__)
       
   337   stack_t ss;
       
   338   int rslt = pthread_stackseg_np(pthread_self(), &ss);
       
   339 
       
   340   if (rslt != 0)
       
   341     fatal(err_msg("pthread_stackseg_np failed with err = %d", rslt));
       
   342 
       
   343   stack_top = (address) ss.ss_sp;
       
   344   stack_bytes  = ss.ss_size;
       
   345   stack_bottom = stack_top - stack_bytes;
       
   346 #elif defined(_ALLBSD_SOURCE)
       
   347   pthread_attr_t attr;
       
   348 
       
   349   int rslt = pthread_attr_init(&attr);
       
   350 
       
   351   // JVM needs to know exact stack location, abort if it fails
       
   352   if (rslt != 0)
       
   353     fatal(err_msg("pthread_attr_init failed with err = %d", rslt));
       
   354 
       
   355   rslt = pthread_attr_get_np(pthread_self(), &attr);
       
   356 
       
   357   if (rslt != 0)
       
   358     fatal(err_msg("pthread_attr_get_np failed with err = %d", rslt));
       
   359 
       
   360   if (pthread_attr_getstackaddr(&attr, (void **) &stack_bottom) != 0 ||
       
   361       pthread_attr_getstacksize(&attr, &stack_bytes) != 0) {
       
   362     fatal("Can not locate current stack attributes!");
       
   363   }
       
   364 
       
   365   pthread_attr_destroy(&attr);
       
   366 
       
   367   stack_top = stack_bottom + stack_bytes;
       
   368 #else /* Linux */
       
   369   pthread_attr_t attr;
       
   370   int res = pthread_getattr_np(pthread_self(), &attr);
       
   371   if (res != 0) {
       
   372     if (res == ENOMEM) {
       
   373       vm_exit_out_of_memory(0, "pthread_getattr_np");
       
   374     }
       
   375     else {
       
   376       fatal(err_msg("pthread_getattr_np failed with errno = %d", res));
       
   377     }
       
   378   }
       
   379 
       
   380   res = pthread_attr_getstack(&attr, (void **) &stack_bottom, &stack_bytes);
       
   381   if (res != 0) {
       
   382     fatal(err_msg("pthread_attr_getstack failed with errno = %d", res));
       
   383   }
       
   384   stack_top = stack_bottom + stack_bytes;
       
   385 
       
   386   // The block of memory returned by pthread_attr_getstack() includes
       
   387   // guard pages where present.  We need to trim these off.
       
   388   size_t page_bytes = os::Bsd::page_size();
       
   389   assert(((intptr_t) stack_bottom & (page_bytes - 1)) == 0, "unaligned stack");
       
   390 
       
   391   size_t guard_bytes;
       
   392   res = pthread_attr_getguardsize(&attr, &guard_bytes);
       
   393   if (res != 0) {
       
   394     fatal(err_msg("pthread_attr_getguardsize failed with errno = %d", res));
       
   395   }
       
   396   int guard_pages = align_size_up(guard_bytes, page_bytes) / page_bytes;
       
   397   assert(guard_bytes == guard_pages * page_bytes, "unaligned guard");
       
   398 
       
   399 #ifdef IA64
       
   400   // IA64 has two stacks sharing the same area of memory, a normal
       
   401   // stack growing downwards and a register stack growing upwards.
       
   402   // Guard pages, if present, are in the centre.  This code splits
       
   403   // the stack in two even without guard pages, though in theory
       
   404   // there's nothing to stop us allocating more to the normal stack
       
   405   // or more to the register stack if one or the other were found
       
   406   // to grow faster.
       
   407   int total_pages = align_size_down(stack_bytes, page_bytes) / page_bytes;
       
   408   stack_bottom += (total_pages - guard_pages) / 2 * page_bytes;
       
   409 #endif // IA64
       
   410 
       
   411   stack_bottom += guard_bytes;
       
   412 
       
   413   pthread_attr_destroy(&attr);
       
   414 
       
   415   // The initial thread has a growable stack, and the size reported
       
   416   // by pthread_attr_getstack is the maximum size it could possibly
       
   417   // be given what currently mapped.  This can be huge, so we cap it.
       
   418   if (os::Bsd::is_initial_thread()) {
       
   419     stack_bytes = stack_top - stack_bottom;
       
   420 
       
   421     if (stack_bytes > JavaThread::stack_size_at_create())
       
   422       stack_bytes = JavaThread::stack_size_at_create();
       
   423 
       
   424     stack_bottom = stack_top - stack_bytes;
       
   425   }
       
   426 #endif
       
   427 
       
   428   assert(os::current_stack_pointer() >= stack_bottom, "should do");
       
   429   assert(os::current_stack_pointer() < stack_top, "should do");
       
   430 
       
   431   *bottom = stack_bottom;
       
   432   *size = stack_top - stack_bottom;
       
   433 }
       
   434 
       
   435 address os::current_stack_base() {
       
   436   address bottom;
       
   437   size_t size;
       
   438   current_stack_region(&bottom, &size);
       
   439   return bottom + size;
       
   440 }
       
   441 
       
   442 size_t os::current_stack_size() {
       
   443   // stack size includes normal stack and HotSpot guard pages
       
   444   address bottom;
       
   445   size_t size;
       
   446   current_stack_region(&bottom, &size);
       
   447   return size;
       
   448 }
       
   449 
       
   450 /////////////////////////////////////////////////////////////////////////////
       
   451 // helper functions for fatal error handler
       
   452 
       
   453 void os::print_context(outputStream* st, void* context) {
       
   454   ShouldNotCallThis();
       
   455 }
       
   456 
       
   457 void os::print_register_info(outputStream *st, void *context) {
       
   458   ShouldNotCallThis();
       
   459 }
       
   460 
       
   461 /////////////////////////////////////////////////////////////////////////////
       
   462 // Stubs for things that would be in bsd_zero.s if it existed.
       
   463 // You probably want to disassemble these monkeys to check they're ok.
       
   464 
       
   465 extern "C" {
       
   466   int SpinPause() {
       
   467   }
       
   468 
       
   469   int SafeFetch32(int *adr, int errValue) {
       
   470     int value = errValue;
       
   471     value = *adr;
       
   472     return value;
       
   473   }
       
   474   intptr_t SafeFetchN(intptr_t *adr, intptr_t errValue) {
       
   475     intptr_t value = errValue;
       
   476     value = *adr;
       
   477     return value;
       
   478   }
       
   479 
       
   480   void _Copy_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
       
   481     if (from > to) {
       
   482       jshort *end = from + count;
       
   483       while (from < end)
       
   484         *(to++) = *(from++);
       
   485     }
       
   486     else if (from < to) {
       
   487       jshort *end = from;
       
   488       from += count - 1;
       
   489       to   += count - 1;
       
   490       while (from >= end)
       
   491         *(to--) = *(from--);
       
   492     }
       
   493   }
       
   494   void _Copy_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
       
   495     if (from > to) {
       
   496       jint *end = from + count;
       
   497       while (from < end)
       
   498         *(to++) = *(from++);
       
   499     }
       
   500     else if (from < to) {
       
   501       jint *end = from;
       
   502       from += count - 1;
       
   503       to   += count - 1;
       
   504       while (from >= end)
       
   505         *(to--) = *(from--);
       
   506     }
       
   507   }
       
   508   void _Copy_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
       
   509     if (from > to) {
       
   510       jlong *end = from + count;
       
   511       while (from < end)
       
   512         os::atomic_copy64(from++, to++);
       
   513     }
       
   514     else if (from < to) {
       
   515       jlong *end = from;
       
   516       from += count - 1;
       
   517       to   += count - 1;
       
   518       while (from >= end)
       
   519         os::atomic_copy64(from--, to--);
       
   520     }
       
   521   }
       
   522 
       
   523   void _Copy_arrayof_conjoint_bytes(HeapWord* from,
       
   524                                     HeapWord* to,
       
   525                                     size_t    count) {
       
   526     memmove(to, from, count);
       
   527   }
       
   528   void _Copy_arrayof_conjoint_jshorts(HeapWord* from,
       
   529                                       HeapWord* to,
       
   530                                       size_t    count) {
       
   531     memmove(to, from, count * 2);
       
   532   }
       
   533   void _Copy_arrayof_conjoint_jints(HeapWord* from,
       
   534                                     HeapWord* to,
       
   535                                     size_t    count) {
       
   536     memmove(to, from, count * 4);
       
   537   }
       
   538   void _Copy_arrayof_conjoint_jlongs(HeapWord* from,
       
   539                                      HeapWord* to,
       
   540                                      size_t    count) {
       
   541     memmove(to, from, count * 8);
       
   542   }
       
   543 };
       
   544 
       
   545 /////////////////////////////////////////////////////////////////////////////
       
   546 // Implementations of atomic operations not supported by processors.
       
   547 //  -- http://gcc.gnu.org/onlinedocs/gcc-4.2.1/gcc/Atomic-Builtins.html
       
   548 
       
   549 #ifndef _LP64
       
   550 extern "C" {
       
   551   long long unsigned int __sync_val_compare_and_swap_8(
       
   552     volatile void *ptr,
       
   553     long long unsigned int oldval,
       
   554     long long unsigned int newval) {
       
   555     ShouldNotCallThis();
       
   556   }
       
   557 };
       
   558 #endif // !_LP64