Skip to content

Commit

Permalink
[JIT] backport: flush OSR nmethods in code cache
Browse files Browse the repository at this point in the history
Summary:
8023191 : OSR nmethods should be flushed to free space in CodeCache
8152947 : VM crash with assert(!removed || is_in_use()) failed: unused
          osr nmethod should be invalidated
origin bug: https://bugs.java.com/bugdatabase/view_bug.do?bug_id=8023191
            https://bugs.java.com/bugdatabase/view_bug.do?bug_id=8152947
review url: http://cr.openjdk.java.net/~thartmann/8023191/webrev.01/
            http://cr.openjdk.java.net/~thartmann/8152947/webrev.01/

Test Plan: jtreg tests

Reviewers: yueshi.zwj, zhuoren.wz

Issue: dragonwell-project/dragonwell8#284

CR:
  • Loading branch information
kuaiwei committed Jan 11, 2022
1 parent 367a8e9 commit ddbaf49
Show file tree
Hide file tree
Showing 6 changed files with 73 additions and 47 deletions.
49 changes: 35 additions & 14 deletions src/share/vm/code/nmethod.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1362,8 +1362,19 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
}
// Unlink the osr method, so we do not look this up again
if (is_osr_method()) {
invalidate_osr_method();
// Invalidate the osr nmethod only once
if (is_in_use()) {
invalidate_osr_method();
}
#ifdef ASSERT
if (method() != NULL) {
// Make sure osr nmethod is invalidated, i.e. not on the list
bool found = method()->method_holder()->remove_osr_nmethod(this);
assert(!found, "osr nmethod should have been invalidated");
}
#endif
}

// If _method is already NULL the Method* is about to be unloaded,
// so we don't have to break the cycle. Note that it is possible to
// have the Method* live here, in case we unload the nmethod because
Expand Down Expand Up @@ -1403,8 +1414,9 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
void nmethod::invalidate_osr_method() {
assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
// Remove from list of active nmethods
if (method() != NULL)
if (method() != NULL) {
method()->method_holder()->remove_osr_nmethod(this);
}
// Set entry as invalid
_entry_bci = InvalidOSREntryBci;
}
Expand Down Expand Up @@ -1467,8 +1479,9 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
// invalidate osr nmethod before acquiring the patching lock since
// they both acquire leaf locks and we don't want a deadlock.
// This logic is equivalent to the logic below for patching the
// verified entry point of regular methods.
if (is_osr_method()) {
// verified entry point of regular methods. We check that the
// nmethod is in use to ensure that it is invalidated only once.
if (is_osr_method() && is_in_use()) {
// this effectively makes the osr nmethod not entrant
invalidate_osr_method();
}
Expand Down Expand Up @@ -1531,13 +1544,21 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
}
} // leave critical region under Patching_lock

#ifdef ASSERT
if (is_osr_method() && method() != NULL) {
// Make sure osr nmethod is invalidated, i.e. not on the list
bool found = method()->method_holder()->remove_osr_nmethod(this);
assert(!found, "osr nmethod should have been invalidated");
}
#endif

// When the nmethod becomes zombie it is no longer alive so the
// dependencies must be flushed. nmethods in the not_entrant
// state will be flushed later when the transition to zombie
// happens or they get unloaded.
if (state == zombie) {
{
// Flushing dependecies must be done before any possible
// Flushing dependencies must be done before any possible
// safepoint can sneak in, otherwise the oops used by the
// dependency logic could have become stale.
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
Expand All @@ -1549,7 +1570,7 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {

// zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
// event and it hasn't already been reported for this nmethod then
// report it now. The event may have been reported earilier if the GC
// report it now. The event may have been reported earlier if the GC
// marked it for unloading). JvmtiDeferredEventQueue support means
// we no longer go to a safepoint here.
post_compiled_method_unload();
Expand All @@ -1576,17 +1597,20 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {

void nmethod::flush() {
// Note that there are no valid oops in the nmethod anymore.
assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
assert(!is_osr_method() || is_unloaded() || is_zombie(),
"osr nmethod must be unloaded or zombie before flushing");
assert(is_zombie() || is_osr_method(), "must be a zombie method");
assert(is_marked_for_reclamation() || is_osr_method(), "must be marked for reclamation");


assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
assert_locked_or_safepoint(CodeCache_lock);

// completely deallocate this method
Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this);
if (PrintMethodFlushing) {
tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
_compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()/1024);
tty->print_cr("*flushing %s nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
is_osr_method() ? "osr" : "", _compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()/1024);
}

// We need to deallocate any ExceptionCache data.
Expand Down Expand Up @@ -2895,10 +2919,7 @@ void nmethod::print() const {
tty->print("((nmethod*) " INTPTR_FORMAT ") ", this);
tty->print(" for method " INTPTR_FORMAT , (address)method());
tty->print(" { ");
if (is_in_use()) tty->print("in_use ");
if (is_not_entrant()) tty->print("not_entrant ");
if (is_zombie()) tty->print("zombie ");
if (is_unloaded()) tty->print("unloaded ");
tty->print_cr("%s", state());
if (on_scavenge_root_list()) tty->print("scavenge_root ");
tty->print_cr("}:");
}
Expand Down
17 changes: 15 additions & 2 deletions src/share/vm/code/nmethod.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ class nmethod : public CodeBlob {
unsigned int _has_wide_vectors:1; // Preserve wide vectors at safepoints

// Protected by Patching_lock
volatile unsigned char _state; // {alive, not_entrant, zombie, unloaded}
volatile unsigned char _state; // {in_use, not_entrant, zombie, unloaded}

volatile unsigned char _unloading_clock; // Incremented after GC unloaded/cleaned the nmethod

Expand Down Expand Up @@ -437,7 +437,20 @@ class nmethod : public CodeBlob {
bool is_alive() const { unsigned char s = _state; return s == in_use || s == not_entrant; }
bool is_not_entrant() const { return _state == not_entrant; }
bool is_zombie() const { return _state == zombie; }
bool is_unloaded() const { return _state == unloaded; }
bool is_unloaded() const { return _state == unloaded; }

// returns a string version of the nmethod state
const char* state() const {
switch(_state) {
case in_use: return "in use";
case not_entrant: return "not_entrant";
case zombie: return "zombie";
case unloaded: return "unloaded";
default:
fatal("unexpected nmethod state");
return NULL;
}
}

#if INCLUDE_RTM_OPT
// rtm state accessing and manipulating
Expand Down
7 changes: 5 additions & 2 deletions src/share/vm/oops/instanceKlass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2998,8 +2998,8 @@ void InstanceKlass::add_osr_nmethod(nmethod* n) {
}
}


void InstanceKlass::remove_osr_nmethod(nmethod* n) {
// Remove osr nmethod from the list. Return true if found and removed.
bool InstanceKlass::remove_osr_nmethod(nmethod* n) {
// This is a short non-blocking critical region, so the no safepoint check is ok.
OsrList_lock->lock_without_safepoint_check();
assert(n->is_osr_method(), "wrong kind of nmethod");
Expand All @@ -3008,6 +3008,7 @@ void InstanceKlass::remove_osr_nmethod(nmethod* n) {
int max_level = CompLevel_none; // Find the max comp level excluding n
Method* m = n->method();
// Search for match
bool found = false;
while(cur != NULL && cur != n) {
if (TieredCompilation && m == cur->method()) {
// Find max level before n
Expand All @@ -3018,6 +3019,7 @@ void InstanceKlass::remove_osr_nmethod(nmethod* n) {
}
nmethod* next = NULL;
if (cur == n) {
found = true;
next = cur->osr_link();
if (last == NULL) {
// Remove first element
Expand All @@ -3040,6 +3042,7 @@ void InstanceKlass::remove_osr_nmethod(nmethod* n) {
}
// Remember to unlock again
OsrList_lock->unlock();
return found;
}

int InstanceKlass::mark_osr_nmethods(const Method* m) {
Expand Down
2 changes: 1 addition & 1 deletion src/share/vm/oops/instanceKlass.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -865,7 +865,7 @@ class InstanceKlass: public Klass {
nmethod* osr_nmethods_head() const { return _osr_nmethods_head; };
void set_osr_nmethods_head(nmethod* h) { _osr_nmethods_head = h; };
void add_osr_nmethod(nmethod* n);
void remove_osr_nmethod(nmethod* n);
bool remove_osr_nmethod(nmethod* n);
int mark_osr_nmethods(const Method* m);
nmethod* lookup_osr_nmethod(const Method* m, int bci, int level, bool match_level) const;

Expand Down
36 changes: 13 additions & 23 deletions src/share/vm/runtime/sweeper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -513,7 +513,7 @@ int NMethodSweeper::process_nmethod(nmethod *nm) {
if (nm->is_locked_by_vm()) {
// But still remember to clean-up inline caches for alive nmethods
if (nm->is_alive()) {
// Clean inline caches that point to zombie/non-entrant methods
// Clean inline caches that point to zombie/non-entrant/unloaded methods
MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches();
SWEEP(nm);
Expand All @@ -527,33 +527,25 @@ int NMethodSweeper::process_nmethod(nmethod *nm) {
// there are no inline caches that refer to it.
if (nm->is_marked_for_reclamation()) {
assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
}
freed_memory = nm->total_size();
if (nm->is_compiled_by_c2()) {
_total_nof_c2_methods_reclaimed++;
}
release_nmethod(nm);
_flushed_count++;
} else {
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
}
nm->mark_for_reclamation();
// Keep track of code cache state change
_bytes_changed += nm->total_size();
_marked_for_reclamation_count++;
SWEEP(nm);
assert(nm->is_marked_for_reclamation(), "nmethod must be marked for reclamation");
}
} else if (nm->is_not_entrant()) {
// If there are no current activations of this method on the
// stack we can safely convert it to a zombie method
if (nm->can_convert_to_zombie()) {
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
}
// Clear ICStubs to prevent back patching stubs of zombie or unloaded
// Clear ICStubs to prevent back patching stubs of zombie or flushed
// nmethods during the next safepoint (see ICStub::finalize).
MutexLocker cl(CompiledIC_lock);
nm->clear_ic_stubs();
Expand All @@ -568,9 +560,13 @@ int NMethodSweeper::process_nmethod(nmethod *nm) {
SWEEP(nm);
}
} else if (nm->is_unloaded()) {
// Unloaded code, just make it a zombie
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
// Code is unloaded, so there are no activations on the stack.
// Convert the nmethod to zombie or flush it directly in the OSR case.
{
// Clean ICs of unloaded nmethods as well because they may reference other
// unloaded nmethods that may be flushed earlier in the sweeper cycle.
MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches();
}
if (nm->is_osr_method()) {
SWEEP(nm);
Expand All @@ -582,21 +578,15 @@ int NMethodSweeper::process_nmethod(nmethod *nm) {
release_nmethod(nm);
_flushed_count++;
} else {
{
// Clean ICs of unloaded nmethods as well because they may reference other
// unloaded nmethods that may be flushed earlier in the sweeper cycle.
MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches();
}
// Code cache state change is tracked in make_zombie()
nm->make_zombie();
_zombified_count++;
SWEEP(nm);
}
} else {
if (UseCodeCacheFlushing) {
if (!nm->is_locked_by_vm() && !nm->is_osr_method() && !nm->is_native_method()) {
// Do not make native methods and OSR-methods not-entrant
if (!nm->is_locked_by_vm() && !nm->is_native_method()) {
// Do not make native methods not-entrant
nm->dec_hotness_counter();
// Get the initial value of the hotness counter. This value depends on the
// ReservedCodeCacheSize
Expand Down Expand Up @@ -625,7 +615,7 @@ int NMethodSweeper::process_nmethod(nmethod *nm) {
}
}
}
// Clean-up all inline caches that point to zombie/non-reentrant methods
// Clean inline caches that point to zombie/non-reentrant/unloaded nmethods
MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches();
SWEEP(nm);
Expand Down
9 changes: 4 additions & 5 deletions src/share/vm/runtime/sweeper.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,12 +41,12 @@
// and sweep_code_cache() cannot execute at the same time.
// To reclaim memory, nmethods are first marked as 'not-entrant'. Methods can
// be made not-entrant by (i) the sweeper, (ii) deoptimization, (iii) dependency
// invalidation, and (iv) being replaced be a different method version (tiered
// compilation). Not-entrant nmethod cannot be called by Java threads, but they
// can still be active on the stack. To ensure that active nmethod are not reclaimed,
// invalidation, and (iv) being replaced by a different method version (tiered
// compilation). Not-entrant nmethods cannot be called by Java threads, but they
// can still be active on the stack. To ensure that active nmethods are not reclaimed,
// we have to wait until the next marking phase has completed. If a not-entrant
// nmethod was NOT marked as active, it can be converted to 'zombie' state. To safely
// remove the nmethod, all inline caches (IC) that point to the the nmethod must be
// remove the nmethod, all inline caches (IC) that point to the nmethod must be
// cleared. After that, the nmethod can be evicted from the code cache. Each nmethod's
// state change happens during separate sweeps. It may take at least 3 sweeps before an
// nmethod's space is freed. Sweeping is currently done by compiler threads between
Expand Down Expand Up @@ -96,7 +96,6 @@ class NMethodSweeper : public AllStatic {
static const Tickspan peak_sweep_fraction_time() { return _peak_sweep_fraction_time; }
static void log_sweep(const char* msg, const char* format = NULL, ...) ATTRIBUTE_PRINTF(2, 3);


#ifdef ASSERT
static bool is_sweeping(nmethod* which) { return _current == which; }
// Keep track of sweeper activity in the ring buffer
Expand Down

0 comments on commit ddbaf49

Please sign in to comment.