diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad index 75baef153f875..a6d037dba554f 100644 --- a/src/hotspot/cpu/aarch64/aarch64.ad +++ b/src/hotspot/cpu/aarch64/aarch64.ad @@ -1765,10 +1765,6 @@ void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const { // n.b. frame size includes space for return pc and rfp const int framesize = C->output()->frame_size_in_bytes(); - // insert a nop at the start of the prolog so we can patch in a - // branch if we need to invalidate the method later - __ nop(); - if (C->clinit_barrier_on_entry()) { assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started"); diff --git a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp index 33158d6b97a91..6d7c758939e2d 100644 --- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp @@ -212,11 +212,6 @@ void NativeMovRegMem::verify() { void NativeJump::verify() { ; } - -void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) { -} - - address NativeJump::jump_destination() const { address dest = MacroAssembler::target_addr_for_insn_or_null(instruction_address()); @@ -345,10 +340,6 @@ bool NativeInstruction::is_movk() { return Instruction_aarch64::extract(int_at(0), 30, 23) == 0b11100101; } -bool NativeInstruction::is_sigill_not_entrant() { - return uint_at(0) == 0xd4bbd5a1; // dcps1 #0xdead -} - void NativeIllegalInstruction::insert(address code_pos) { *(juint*)code_pos = 0xd4bbd5a1; // dcps1 #0xdead } @@ -359,31 +350,6 @@ bool NativeInstruction::is_stop() { //------------------------------------------------------------------- -// MT-safe inserting of a jump over a jump or a nop (used by -// nmethod::make_not_entrant) - -void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) { - - assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch"); - assert(nativeInstruction_at(verified_entry)->is_jump_or_nop() - || nativeInstruction_at(verified_entry)->is_sigill_not_entrant(), - "Aarch64 cannot replace non-jump with jump"); - - // Patch this nmethod atomically. - if (Assembler::reachable_from_branch_at(verified_entry, dest)) { - ptrdiff_t disp = dest - verified_entry; - guarantee(disp < 1 << 27 && disp > - (1 << 27), "branch overflow"); - - unsigned int insn = (0b000101 << 26) | ((disp >> 2) & 0x3ffffff); - *(unsigned int*)verified_entry = insn; - } else { - // We use an illegal instruction for marking a method as not_entrant. - NativeIllegalInstruction::insert(verified_entry); - } - - ICache::invalidate_range(verified_entry, instruction_size); -} - void NativeGeneralJump::verify() { } void NativeGeneralJump::insert_unconditional(address code_pos, address entry) { diff --git a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp index 0eb5ff815be1d..915e33fe56a7f 100644 --- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2108, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -83,7 +83,6 @@ class NativeInstruction { bool is_safepoint_poll(); bool is_movz(); bool is_movk(); - bool is_sigill_not_entrant(); bool is_stop(); protected: @@ -360,9 +359,6 @@ class NativeJump: public NativeInstruction { // Insertion of native jump instruction static void insert(address code_pos, address entry); - // MT-safe insertion of native jump at verified method entry - static void check_verified_entry_alignment(address entry, address verified_entry); - static void patch_verified_entry(address entry, address verified_entry, address dest); }; inline NativeJump* nativeJump_at(address address) { diff --git a/src/hotspot/cpu/arm/gc/shared/barrierSetAssembler_arm.cpp b/src/hotspot/cpu/arm/gc/shared/barrierSetAssembler_arm.cpp index 4492c9da33e1f..029e5131a846e 100644 --- a/src/hotspot/cpu/arm/gc/shared/barrierSetAssembler_arm.cpp +++ b/src/hotspot/cpu/arm/gc/shared/barrierSetAssembler_arm.cpp @@ -193,10 +193,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm) { __ bind(guard); // nmethod guard value. Skipped over in common case. - // - // Put a debug value to make any offsets skew - // clearly visible in coredump - __ emit_int32(0xDEADBEAF); + __ emit_int32(0); // initial armed value, will be reset later __ bind(skip); __ block_comment("nmethod_barrier end"); diff --git a/src/hotspot/cpu/arm/nativeInst_arm_32.cpp b/src/hotspot/cpu/arm/nativeInst_arm_32.cpp index 2caf2d7587e53..232294b246a12 100644 --- a/src/hotspot/cpu/arm/nativeInst_arm_32.cpp +++ b/src/hotspot/cpu/arm/nativeInst_arm_32.cpp @@ -282,16 +282,6 @@ void NativeMovConstReg::set_pc_relative_offset(address addr, address pc) { } } -void RawNativeJump::check_verified_entry_alignment(address entry, address verified_entry) { -} - -void RawNativeJump::patch_verified_entry(address entry, address verified_entry, address dest) { - assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "should be"); - int *a = (int *)verified_entry; - a[0] = not_entrant_illegal_instruction; // always illegal - ICache::invalidate_range((address)&a[0], sizeof a[0]); -} - void NativeGeneralJump::insert_unconditional(address code_pos, address entry) { int offset = (int)(entry - code_pos - 8); assert(offset < 0x2000000 && offset > -0x2000000, "encoding constraint"); diff --git a/src/hotspot/cpu/arm/nativeInst_arm_32.hpp b/src/hotspot/cpu/arm/nativeInst_arm_32.hpp index e26c23cd9836c..ee856bcfe6049 100644 --- a/src/hotspot/cpu/arm/nativeInst_arm_32.hpp +++ b/src/hotspot/cpu/arm/nativeInst_arm_32.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,10 +61,6 @@ class RawNativeInstruction { instr_fld_fst = 0xd0 }; - // illegal instruction used by NativeJump::patch_verified_entry - // permanently undefined (UDF): 0xe << 28 | 0b1111111 << 20 | 0b1111 << 4 - static const int not_entrant_illegal_instruction = 0xe7f000f0; - static int decode_rotated_imm12(int encoding) { int base = encoding & 0xff; int right_rotation = (encoding & 0xf00) >> 7; @@ -274,10 +270,6 @@ class RawNativeJump: public NativeInstruction { } } - static void check_verified_entry_alignment(address entry, address verified_entry); - - static void patch_verified_entry(address entry, address verified_entry, address dest); - }; inline RawNativeJump* rawNativeJump_at(address address) { diff --git a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp index 77d3653aefdb8..02e069b6be16d 100644 --- a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp @@ -46,7 +46,6 @@ void C1_MacroAssembler::explicit_null_check(Register base) { void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) { - // Avoid stack bang as first instruction. It may get overwritten by patch_verified_entry. const Register return_pc = R20; mflr(return_pc); diff --git a/src/hotspot/cpu/ppc/nativeInst_ppc.cpp b/src/hotspot/cpu/ppc/nativeInst_ppc.cpp index 1114da60d2bb6..ca492329729c0 100644 --- a/src/hotspot/cpu/ppc/nativeInst_ppc.cpp +++ b/src/hotspot/cpu/ppc/nativeInst_ppc.cpp @@ -39,18 +39,6 @@ #include "c1/c1_Runtime1.hpp" #endif -// We use an illtrap for marking a method as not_entrant -// Work around a C++ compiler bug which changes 'this' -bool NativeInstruction::is_sigill_not_entrant_at(address addr) { - if (!Assembler::is_illtrap(addr)) return false; - CodeBlob* cb = CodeCache::find_blob(addr); - if (cb == nullptr || !cb->is_nmethod()) return false; - nmethod *nm = (nmethod *)cb; - // This method is not_entrant iff the illtrap instruction is - // located at the verified entry point. - return nm->verified_entry_point() == addr; -} - #ifdef ASSERT void NativeInstruction::verify() { // Make sure code pattern is actually an instruction address. @@ -331,25 +319,6 @@ void NativeMovConstReg::verify() { } #endif // ASSERT -void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) { - ResourceMark rm; - int code_size = 1 * BytesPerInstWord; - CodeBuffer cb(verified_entry, code_size + 1); - MacroAssembler* a = new MacroAssembler(&cb); -#ifdef COMPILER2 - assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch"); -#endif - // Patch this nmethod atomically. Always use illtrap/trap in debug build. - if (DEBUG_ONLY(false &&) a->is_within_range_of_b(dest, a->pc())) { - a->b(dest); - } else { - // The signal handler will continue at dest=OptoRuntime::handle_wrong_method_stub(). - // We use an illtrap for marking a method as not_entrant. - a->illtrap(); - } - ICache::ppc64_flush_icache_bytes(verified_entry, code_size); -} - #ifdef ASSERT void NativeJump::verify() { address addr = addr_at(0); @@ -462,9 +431,7 @@ bool NativeDeoptInstruction::is_deopt_at(address code_pos) { if (!Assembler::is_illtrap(code_pos)) return false; CodeBlob* cb = CodeCache::find_blob(code_pos); if (cb == nullptr || !cb->is_nmethod()) return false; - nmethod *nm = (nmethod *)cb; - // see NativeInstruction::is_sigill_not_entrant_at() - return nm->verified_entry_point() != code_pos; + return true; } // Inserts an instruction which is specified to cause a SIGILL at a given pc diff --git a/src/hotspot/cpu/ppc/nativeInst_ppc.hpp b/src/hotspot/cpu/ppc/nativeInst_ppc.hpp index f4d570116a8c1..38126ec858dc9 100644 --- a/src/hotspot/cpu/ppc/nativeInst_ppc.hpp +++ b/src/hotspot/cpu/ppc/nativeInst_ppc.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2024 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -69,13 +69,6 @@ class NativeInstruction { return MacroAssembler::tdi_get_si16(long_at(0), Assembler::traptoUnconditional, 0); } - // We use an illtrap for marking a method as not_entrant. - bool is_sigill_not_entrant() { - // Work around a C++ compiler bug which changes 'this'. - return NativeInstruction::is_sigill_not_entrant_at(addr_at(0)); - } - static bool is_sigill_not_entrant_at(address addr); - #ifdef COMPILER2 // SIGTRAP-based implicit range checks bool is_sigtrap_range_check() { @@ -328,15 +321,7 @@ class NativeJump: public NativeInstruction { } } - // MT-safe insertion of native jump at verified method entry - static void patch_verified_entry(address entry, address verified_entry, address dest); - void verify() NOT_DEBUG_RETURN; - - static void check_verified_entry_alignment(address entry, address verified_entry) { - // We just patch one instruction on ppc64, so the jump doesn't have to - // be aligned. Nothing to do here. - } }; // Instantiates a NativeJump object starting at the given instruction diff --git a/src/hotspot/cpu/ppc/ppc.ad b/src/hotspot/cpu/ppc/ppc.ad index d8e00cfef8957..c9e10d71a10a9 100644 --- a/src/hotspot/cpu/ppc/ppc.ad +++ b/src/hotspot/cpu/ppc/ppc.ad @@ -1686,12 +1686,7 @@ void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const { const Register toc_temp = R23; assert_different_registers(R11, return_pc, callers_sp, push_frame_temp, toc_temp); - if (method_is_frameless) { - // Add nop at beginning of all frameless methods to prevent any - // oop instructions from getting overwritten by make_not_entrant - // (patching attempt would fail). - __ nop(); - } else { + if (!method_is_frameless) { // Get return pc. __ mflr(return_pc); } diff --git a/src/hotspot/cpu/riscv/nativeInst_riscv.cpp b/src/hotspot/cpu/riscv/nativeInst_riscv.cpp index 31947b520d056..3834a0d3d3d14 100644 --- a/src/hotspot/cpu/riscv/nativeInst_riscv.cpp +++ b/src/hotspot/cpu/riscv/nativeInst_riscv.cpp @@ -356,18 +356,6 @@ void NativeMovRegMem::verify() { void NativeJump::verify() { } - -void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) { - // Patching to not_entrant can happen while activations of the method are - // in use. The patching in that instance must happen only when certain - // alignment restrictions are true. These guarantees check those - // conditions. - - // Must be 4 bytes aligned - MacroAssembler::assert_alignment(verified_entry); -} - - address NativeJump::jump_destination() const { address dest = MacroAssembler::target_addr_for_insn(instruction_address()); @@ -420,12 +408,6 @@ bool NativeInstruction::is_safepoint_poll() { return MacroAssembler::is_lwu_to_zr(address(this)); } -// A 16-bit instruction with all bits ones is permanently reserved as an illegal instruction. -bool NativeInstruction::is_sigill_not_entrant() { - // jvmci - return uint_at(0) == 0xffffffff; -} - void NativeIllegalInstruction::insert(address code_pos) { assert_cond(code_pos != nullptr); Assembler::sd_instr(code_pos, 0xffffffff); // all bits ones is permanently reserved as an illegal instruction @@ -437,45 +419,6 @@ bool NativeInstruction::is_stop() { //------------------------------------------------------------------- -// MT-safe inserting of a jump over a jump or a nop (used by -// nmethod::make_not_entrant) - -void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) { - - assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch"); - - assert(nativeInstruction_at(verified_entry)->is_jump_or_nop() || - nativeInstruction_at(verified_entry)->is_sigill_not_entrant(), - "riscv cannot replace non-jump with jump"); - - check_verified_entry_alignment(entry, verified_entry); - - // Patch this nmethod atomically. - if (Assembler::reachable_from_branch_at(verified_entry, dest)) { - ptrdiff_t offset = dest - verified_entry; - guarantee(Assembler::is_simm21(offset) && ((offset % 2) == 0), - "offset is too large to be patched in one jal instruction."); // 1M - - uint32_t insn = 0; - address pInsn = (address)&insn; - Assembler::patch(pInsn, 31, 31, (offset >> 20) & 0x1); - Assembler::patch(pInsn, 30, 21, (offset >> 1) & 0x3ff); - Assembler::patch(pInsn, 20, 20, (offset >> 11) & 0x1); - Assembler::patch(pInsn, 19, 12, (offset >> 12) & 0xff); - Assembler::patch(pInsn, 11, 7, 0); // zero, no link jump - Assembler::patch(pInsn, 6, 0, 0b1101111); // j, (jal x0 offset) - Assembler::sd_instr(verified_entry, insn); - } else { - // We use an illegal instruction for marking a method as - // not_entrant. - NativeIllegalInstruction::insert(verified_entry); - } - - ICache::invalidate_range(verified_entry, instruction_size); -} - -//------------------------------------------------------------------- - void NativeGeneralJump::insert_unconditional(address code_pos, address entry) { CodeBuffer cb(code_pos, instruction_size); MacroAssembler a(&cb); diff --git a/src/hotspot/cpu/riscv/nativeInst_riscv.hpp b/src/hotspot/cpu/riscv/nativeInst_riscv.hpp index d8f5fa57816f3..1598dfb839807 100644 --- a/src/hotspot/cpu/riscv/nativeInst_riscv.hpp +++ b/src/hotspot/cpu/riscv/nativeInst_riscv.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved. * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -74,7 +74,6 @@ class NativeInstruction { bool is_nop() const; bool is_jump_or_nop(); bool is_safepoint_poll(); - bool is_sigill_not_entrant(); bool is_stop(); protected: @@ -274,9 +273,6 @@ class NativeJump: public NativeInstruction { // Insertion of native jump instruction static void insert(address code_pos, address entry); - // MT-safe insertion of native jump at verified method entry - static void check_verified_entry_alignment(address entry, address verified_entry); - static void patch_verified_entry(address entry, address verified_entry, address dest); }; inline NativeJump* nativeJump_at(address addr) { diff --git a/src/hotspot/cpu/riscv/riscv.ad b/src/hotspot/cpu/riscv/riscv.ad index 0d44acc803f32..634e5fd506394 100644 --- a/src/hotspot/cpu/riscv/riscv.ad +++ b/src/hotspot/cpu/riscv/riscv.ad @@ -1368,14 +1368,6 @@ void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const { // n.b. frame size includes space for return pc and fp const int framesize = C->output()->frame_size_in_bytes(); - // insert a nop at the start of the prolog so we can patch in a - // branch if we need to invalidate the method later - { - Assembler::IncompressibleScope scope(masm); // keep the nop as 4 bytes for patching. - MacroAssembler::assert_alignment(__ pc()); - __ nop(); // 4 bytes - } - assert_cond(C != nullptr); if (C->clinit_barrier_on_entry()) { @@ -1804,7 +1796,6 @@ void MachUEPNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const // This is the unverified entry point. __ ic_check(CodeEntryAlignment); - // Verified entry point must be properly 4 bytes aligned for patching by NativeJump::patch_verified_entry(). // ic_check() aligns to CodeEntryAlignment >= InteriorEntryAlignment(min 16) > NativeInstruction::instruction_size(4). assert(((__ offset()) % CodeEntryAlignment) == 0, "Misaligned verified entry point"); } @@ -8199,7 +8190,7 @@ instruct unnecessary_membar_volatile_rvtso() %{ ins_cost(0); size(0); - + format %{ "#@unnecessary_membar_volatile_rvtso (unnecessary so empty encoding)" %} ins_encode %{ __ block_comment("unnecessary_membar_volatile_rvtso"); diff --git a/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.cpp b/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.cpp index e78906708afd7..2d663061aec9c 100644 --- a/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.cpp @@ -180,7 +180,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm) { __ z_lg(Z_R0_scratch, in_bytes(bs_nm->thread_disarmed_guard_value_offset()), Z_thread); // 6 bytes // Compare to current patched value: - __ z_cfi(Z_R0_scratch, /* to be patched */ -1); // 6 bytes (2 + 4 byte imm val) + __ z_cfi(Z_R0_scratch, /* to be patched */ 0); // 6 bytes (2 + 4 byte imm val) // Conditional Jump __ z_larl(Z_R14, (Assembler::instr_len((unsigned long)LARL_ZOPC) + Assembler::instr_len((unsigned long)BCR_ZOPC)) / 2); // 6 bytes diff --git a/src/hotspot/cpu/s390/nativeInst_s390.cpp b/src/hotspot/cpu/s390/nativeInst_s390.cpp index 9990c225a8986..546f8b133975d 100644 --- a/src/hotspot/cpu/s390/nativeInst_s390.cpp +++ b/src/hotspot/cpu/s390/nativeInst_s390.cpp @@ -167,27 +167,6 @@ bool NativeInstruction::is_illegal() { return halfword_at(-2) == illegal_instruction(); } -// We use an illtrap for marking a method as not_entrant. -bool NativeInstruction::is_sigill_not_entrant() { - if (!is_illegal()) return false; // Just a quick path. - - // One-sided error of is_illegal tolerable here - // (see implementation of is_illegal() for details). - - CodeBlob* cb = CodeCache::find_blob(addr_at(0)); - if (cb == nullptr || !cb->is_nmethod()) { - return false; - } - - nmethod *nm = (nmethod *)cb; - // This method is not_entrant if the illtrap instruction - // is located at the verified entry point. - // BE AWARE: the current pc (this) points to the instruction after the - // "illtrap" location. - address sig_addr = ((address) this) - 2; - return nm->verified_entry_point() == sig_addr; -} - bool NativeInstruction::is_jump() { unsigned long inst; Assembler::get_instruction((address)this, &inst); @@ -620,19 +599,6 @@ void NativeJump::verify() { fatal("this is not a `NativeJump' site"); } -// Patch atomically with an illtrap. -void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) { - ResourceMark rm; - int code_size = 2; - CodeBuffer cb(verified_entry, code_size + 1); - MacroAssembler* a = new MacroAssembler(&cb); -#ifdef COMPILER2 - assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch"); -#endif - a->z_illtrap(); - ICache::invalidate_range(verified_entry, code_size); -} - #undef LUCY_DBG //------------------------------------- diff --git a/src/hotspot/cpu/s390/nativeInst_s390.hpp b/src/hotspot/cpu/s390/nativeInst_s390.hpp index fcae833769f74..16400df3f2686 100644 --- a/src/hotspot/cpu/s390/nativeInst_s390.hpp +++ b/src/hotspot/cpu/s390/nativeInst_s390.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2024 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -85,9 +85,6 @@ class NativeInstruction { // Bcrl is currently the only accepted instruction here. bool is_jump(); - // We use an illtrap for marking a method as not_entrant. - bool is_sigill_not_entrant(); - bool is_safepoint_poll() { // Is the current instruction a POTENTIAL read access to the polling page? // The instruction's current arguments are not checked! @@ -609,11 +606,6 @@ class NativeJump: public NativeInstruction { // Insertion of native jump instruction. static void insert(address code_pos, address entry); - - // MT-safe insertion of native jump at verified method entry. - static void check_verified_entry_alignment(address entry, address verified_entry) { } - - static void patch_verified_entry(address entry, address verified_entry, address dest); }; //------------------------------------- diff --git a/src/hotspot/cpu/s390/stubGenerator_s390.cpp b/src/hotspot/cpu/s390/stubGenerator_s390.cpp index d3f6540a3ea83..bb1d9ce603793 100644 --- a/src/hotspot/cpu/s390/stubGenerator_s390.cpp +++ b/src/hotspot/cpu/s390/stubGenerator_s390.cpp @@ -3197,7 +3197,7 @@ class StubGenerator: public StubCodeGenerator { // VM-Call: BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSetNMethod::nmethod_stub_entry_barrier)); - __ z_ltr(Z_R0_scratch, Z_RET); + __ z_ltr(Z_RET, Z_RET); // VM-Call Epilogue __ restore_volatile_regs(Z_SP, frame::z_abi_160_size, true, false); diff --git a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp index 684347e35fa40..cde429b03832b 100644 --- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp @@ -325,16 +325,6 @@ void C1_MacroAssembler::remove_frame(int frame_size_in_bytes) { void C1_MacroAssembler::verified_entry(bool breakAtEntry) { - if (breakAtEntry) { - // Verified Entry first instruction should be 5 bytes long for correct - // patching by patch_verified_entry(). - // - // Breakpoint has one byte first instruction. - // Also first instruction will be one byte "push(rbp)" if stack banging - // code is not generated (see build_frame() above). - // For all these cases generate long instruction first. - fat_nop(); - } if (breakAtEntry) int3(); // build frame } diff --git a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp index 177be6e59f74a..a0ff36df2ea97 100644 --- a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp @@ -50,13 +50,6 @@ // C2 compiled method's prolog code. void C2_MacroAssembler::verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b, bool is_stub) { - - // WARNING: Initial instruction MUST be 5 bytes or longer so that - // NativeJump::patch_verified_entry will be able to patch out the entry - // code safely. The push to verify stack depth is ok at 5 bytes, - // the frame allocation can be either 3 or 6 bytes. So if we don't do - // stack bang then we must use the 6 byte frame allocation even if - // we have no frame. :-( assert(stack_bang_size >= framesize || stack_bang_size <= 0, "stack bang size incorrect"); assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned"); @@ -87,8 +80,7 @@ void C2_MacroAssembler::verified_entry(int framesize, int stack_bang_size, bool subptr(rsp, framesize); } } else { - // Create frame (force generation of a 4 byte immediate value) - subptr_imm32(rsp, framesize); + subptr(rsp, framesize); // Save RBP register now. framesize -= wordSize; diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp index 803bce4894589..c401863d7cdc8 100644 --- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp @@ -1621,19 +1621,6 @@ void MacroAssembler::post_call_nop() { emit_int32(0x00); } -// A 5 byte nop that is safe for patching (see patch_verified_entry) -void MacroAssembler::fat_nop() { - if (UseAddressNop) { - addr_nop_5(); - } else { - emit_int8((uint8_t)0x26); // es: - emit_int8((uint8_t)0x2e); // cs: - emit_int8((uint8_t)0x64); // fs: - emit_int8((uint8_t)0x65); // gs: - emit_int8((uint8_t)0x90); - } -} - void MacroAssembler::mulpd(XMMRegister dst, AddressLiteral src, Register rscratch) { assert(rscratch != noreg || always_reachable(src), "missing"); if (reachable(src)) { diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.hpp b/src/hotspot/cpu/x86/macroAssembler_x86.hpp index f7ac6fb4297ea..d75c9b624fd3a 100644 --- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp @@ -209,8 +209,6 @@ class MacroAssembler: public Assembler { void align(uint modulus, uint target); void post_call_nop(); - // A 5 byte nop that is safe for patching (see patch_verified_entry) - void fat_nop(); // Stack frame creation/removal void enter(); diff --git a/src/hotspot/cpu/x86/nativeInst_x86.cpp b/src/hotspot/cpu/x86/nativeInst_x86.cpp index c3345be2172f1..aad1f77e17346 100644 --- a/src/hotspot/cpu/x86/nativeInst_x86.cpp +++ b/src/hotspot/cpu/x86/nativeInst_x86.cpp @@ -336,55 +336,6 @@ void NativeJump::insert(address code_pos, address entry) { ICache::invalidate_range(code_pos, instruction_size); } -void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) { - // Patching to not_entrant can happen while activations of the method are - // in use. The patching in that instance must happen only when certain - // alignment restrictions are true. These guarantees check those - // conditions. - const int linesize = 64; - - // Must be wordSize aligned - guarantee(((uintptr_t) verified_entry & (wordSize -1)) == 0, - "illegal address for code patching 2"); - // First 5 bytes must be within the same cache line - 4827828 - guarantee((uintptr_t) verified_entry / linesize == - ((uintptr_t) verified_entry + 4) / linesize, - "illegal address for code patching 3"); -} - - -// MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::make_not_entrant) -// The problem: jmp is a 5-byte instruction. Atomic write can be only with 4 bytes. -// First patches the first word atomically to be a jump to itself. -// Then patches the last byte and then atomically patches the first word (4-bytes), -// thus inserting the desired jump -// This code is mt-safe with the following conditions: entry point is 4 byte aligned, -// entry point is in same cache line as unverified entry point, and the instruction being -// patched is >= 5 byte (size of patch). -// -// In C2 the 5+ byte sized instruction is enforced by code in MachPrologNode::emit. -// In C1 the restriction is enforced by CodeEmitter::method_entry -// In JVMCI, the restriction is enforced by HotSpotFrameContext.enter(...) -// -void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) { - // complete jump instruction (to be inserted) is in code_buffer; - union { - jlong cb_long; - unsigned char code_buffer[8]; - } u; - - u.cb_long = *(jlong *)verified_entry; - - intptr_t disp = (intptr_t)dest - ((intptr_t)verified_entry + 1 + 4); - guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset"); - - u.code_buffer[0] = instruction_code; - *(int32_t*)(u.code_buffer + 1) = (int32_t)disp; - - Atomic::store((jlong *) verified_entry, u.cb_long); - ICache::invalidate_range(verified_entry, 8); -} - void NativeIllegalInstruction::insert(address code_pos) { assert(NativeIllegalInstruction::instruction_size == sizeof(short), "right address unit for update"); *(short *)code_pos = instruction_code; diff --git a/src/hotspot/cpu/x86/nativeInst_x86.hpp b/src/hotspot/cpu/x86/nativeInst_x86.hpp index b2448cb99fdb0..3e76700648010 100644 --- a/src/hotspot/cpu/x86/nativeInst_x86.hpp +++ b/src/hotspot/cpu/x86/nativeInst_x86.hpp @@ -445,9 +445,6 @@ class NativeJump: public NativeInstruction { // Insertion of native jump instruction static void insert(address code_pos, address entry); - // MT-safe insertion of native jump at verified method entry - static void check_verified_entry_alignment(address entry, address verified_entry); - static void patch_verified_entry(address entry, address verified_entry, address dest); }; inline NativeJump* nativeJump_at(address address) { diff --git a/src/hotspot/cpu/zero/nativeInst_zero.cpp b/src/hotspot/cpu/zero/nativeInst_zero.cpp deleted file mode 100644 index 0d2747f7fa698..0000000000000 --- a/src/hotspot/cpu/zero/nativeInst_zero.cpp +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. - * Copyright 2008 Red Hat, Inc. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "asm/assembler.inline.hpp" -#include "entry_zero.hpp" -#include "interpreter/zero/zeroInterpreter.hpp" -#include "nativeInst_zero.hpp" -#include "runtime/sharedRuntime.hpp" - -// This method is called by nmethod::make_not_entrant to -// insert a jump to SharedRuntime::get_handle_wrong_method_stub() -// (dest) at the start of a compiled method (verified_entry) to avoid -// a race where a method is invoked while being made non-entrant. - -void NativeJump::patch_verified_entry(address entry, - address verified_entry, - address dest) { - assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "should be"); - - ((ZeroEntry*) verified_entry)->set_entry_point( - (address) ZeroInterpreter::normal_entry); -} diff --git a/src/hotspot/cpu/zero/nativeInst_zero.hpp b/src/hotspot/cpu/zero/nativeInst_zero.hpp index fd8f03f1a59bf..399a8e96bc3bf 100644 --- a/src/hotspot/cpu/zero/nativeInst_zero.hpp +++ b/src/hotspot/cpu/zero/nativeInst_zero.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright 2007 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -177,14 +177,6 @@ class NativeJump : public NativeInstruction { void set_jump_destination(address dest) { ShouldNotCallThis(); } - - static void check_verified_entry_alignment(address entry, - address verified_entry) { - } - - static void patch_verified_entry(address entry, - address verified_entry, - address dest); }; inline NativeJump* nativeJump_at(address address) { diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp index 9e536d2df97aa..24969683a1fab 100644 --- a/src/hotspot/os/windows/os_windows.cpp +++ b/src/hotspot/os/windows/os_windows.cpp @@ -2752,19 +2752,6 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { } } -#ifdef _M_ARM64 - if (in_java && - (exception_code == EXCEPTION_ILLEGAL_INSTRUCTION || - exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) { - if (nativeInstruction_at(pc)->is_sigill_not_entrant()) { - if (TraceTraps) { - tty->print_cr("trap: not_entrant"); - } - return Handle_Exception(exceptionInfo, SharedRuntime::get_handle_wrong_method_stub()); - } - } -#endif - if (in_java) { switch (exception_code) { case EXCEPTION_INT_DIVIDE_BY_ZERO: diff --git a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp index 3dbb8adddd62e..98b17aaacfcde 100644 --- a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp +++ b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp @@ -229,16 +229,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, CodeBlob *cb = nullptr; int stop_type = -1; - // Handle signal from NativeJump::patch_verified_entry(). - if (sig == SIGILL && nativeInstruction_at(pc)->is_sigill_not_entrant()) { - if (TraceTraps) { - tty->print_cr("trap: not_entrant"); - } - stub = SharedRuntime::get_handle_wrong_method_stub(); - goto run_stub; - } - - else if ((sig == (USE_POLL_BIT_ONLY ? SIGTRAP : SIGSEGV)) && + if ((sig == (USE_POLL_BIT_ONLY ? SIGTRAP : SIGSEGV)) && ((NativeInstruction*)pc)->is_safepoint_poll() && CodeCache::contains((void*) pc) && ((cb = CodeCache::find_blob(pc)) != nullptr) && diff --git a/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp b/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp index ad32ee150e8a3..b7556ca69da79 100644 --- a/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp +++ b/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp @@ -271,14 +271,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, // Java thread running in Java code => find exception handler if any // a fault inside compiled code, the interpreter, or a stub - // Handle signal from NativeJump::patch_verified_entry(). - if ((sig == SIGILL) - && nativeInstruction_at(pc)->is_sigill_not_entrant()) { - if (TraceTraps) { - tty->print_cr("trap: not_entrant"); - } - stub = SharedRuntime::get_handle_wrong_method_stub(); - } else if ((sig == SIGSEGV || sig == SIGBUS) && SafepointMechanism::is_poll_address((address)info->si_addr)) { + if ((sig == SIGSEGV || sig == SIGBUS) && SafepointMechanism::is_poll_address((address)info->si_addr)) { stub = SharedRuntime::get_poll_stub(pc); #if defined(__APPLE__) // 32-bit Darwin reports a SIGBUS for nearly all memory access exceptions. diff --git a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp index 6900283418d58..e565f353382e2 100644 --- a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp +++ b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp @@ -232,14 +232,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, // Java thread running in Java code => find exception handler if any // a fault inside compiled code, the interpreter, or a stub - // Handle signal from NativeJump::patch_verified_entry(). - if ((sig == SIGILL || sig == SIGTRAP) - && nativeInstruction_at(pc)->is_sigill_not_entrant()) { - if (TraceTraps) { - tty->print_cr("trap: not_entrant (%s)", (sig == SIGTRAP) ? "SIGTRAP" : "SIGILL"); - } - stub = SharedRuntime::get_handle_wrong_method_stub(); - } else if (sig == SIGSEGV && SafepointMechanism::is_poll_address((address)info->si_addr)) { + if (sig == SIGSEGV && SafepointMechanism::is_poll_address((address)info->si_addr)) { stub = SharedRuntime::get_poll_stub(pc); } else if (sig == SIGBUS /* && info->si_code == BUS_OBJERR */) { // BugId 4454115: A read from a MappedByteBuffer can fault diff --git a/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp b/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp index 6c245f8f1a6e0..28b47877264a7 100644 --- a/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp +++ b/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp @@ -359,11 +359,6 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, stub = SharedRuntime::continuation_for_implicit_exception( thread, pc, SharedRuntime::IMPLICIT_NULL); } - } else if (sig == SIGILL && - *(int*)pc == - NativeInstruction::not_entrant_illegal_instruction) { - // Not entrant - stub = SharedRuntime::get_handle_wrong_method_stub(); } } else if ((thread->thread_state() == _thread_in_vm || thread->thread_state() == _thread_in_native) && diff --git a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp index 5fe37be0d2012..556189471436a 100644 --- a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp +++ b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp @@ -258,15 +258,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, CodeBlob *cb = nullptr; int stop_type = -1; - // Handle signal from NativeJump::patch_verified_entry(). - if (sig == SIGILL && nativeInstruction_at(pc)->is_sigill_not_entrant()) { - if (TraceTraps) { - tty->print_cr("trap: not_entrant"); - } - stub = SharedRuntime::get_handle_wrong_method_stub(); - } - - else if ((sig == (USE_POLL_BIT_ONLY ? SIGTRAP : SIGSEGV)) && + if ((sig == (USE_POLL_BIT_ONLY ? SIGTRAP : SIGSEGV)) && // A linux-ppc64 kernel before 2.6.6 doesn't set si_addr on some segfaults // in 64bit mode (cf. http://www.kernel.org/pub/linux/kernel/v2.6/ChangeLog-2.6.6), // especially when we try to read from the safepoint polling page. So the check diff --git a/src/hotspot/os_cpu/linux_riscv/os_linux_riscv.cpp b/src/hotspot/os_cpu/linux_riscv/os_linux_riscv.cpp index 8366a7249fa4f..7a5929b0f412b 100644 --- a/src/hotspot/os_cpu/linux_riscv/os_linux_riscv.cpp +++ b/src/hotspot/os_cpu/linux_riscv/os_linux_riscv.cpp @@ -223,14 +223,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, // Java thread running in Java code => find exception handler if any // a fault inside compiled code, the interpreter, or a stub - // Handle signal from NativeJump::patch_verified_entry(). - if ((sig == SIGILL || sig == SIGTRAP) - && nativeInstruction_at(pc)->is_sigill_not_entrant()) { - if (TraceTraps) { - tty->print_cr("trap: not_entrant (%s)", (sig == SIGTRAP) ? "SIGTRAP" : "SIGILL"); - } - stub = SharedRuntime::get_handle_wrong_method_stub(); - } else if (sig == SIGSEGV && SafepointMechanism::is_poll_address((address)info->si_addr)) { + if (sig == SIGSEGV && SafepointMechanism::is_poll_address((address)info->si_addr)) { stub = SharedRuntime::get_poll_stub(pc); } else if (sig == SIGBUS /* && info->si_code == BUS_OBJERR */) { // BugId 4454115: A read from a MappedByteBuffer can fault diff --git a/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp b/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp index 7e4d72fc0667c..4e074512e34ad 100644 --- a/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp +++ b/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp @@ -257,15 +257,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info, // Java thread running in Java code => find exception handler if any // a fault inside compiled code, the interpreter, or a stub - // Handle signal from NativeJump::patch_verified_entry(). - if (sig == SIGILL && nativeInstruction_at(pc)->is_sigill_not_entrant()) { - if (TraceTraps) { - tty->print_cr("trap: not_entrant (SIGILL)"); - } - stub = SharedRuntime::get_handle_wrong_method_stub(); - } - - else if (sig == SIGSEGV && + if (sig == SIGSEGV && SafepointMechanism::is_poll_address((address)info->si_addr)) { if (TraceTraps) { tty->print_cr("trap: safepoint_poll at " INTPTR_FORMAT " (SIGSEGV)", p2i(pc)); diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp index acebaae6ba4f8..4e7e091311967 100644 --- a/src/hotspot/share/code/nmethod.cpp +++ b/src/hotspot/share/code/nmethod.cpp @@ -28,7 +28,6 @@ #include "code/dependencies.hpp" #include "code/nativeInst.hpp" #include "code/nmethod.inline.hpp" -#include "code/relocInfo.hpp" #include "code/scopeDesc.hpp" #include "compiler/abstractCompiler.hpp" #include "compiler/compilationLog.hpp" @@ -692,13 +691,6 @@ address nmethod::oops_reloc_begin() const { } address low_boundary = verified_entry_point(); - if (!is_in_use()) { - low_boundary += NativeJump::instruction_size; - // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. - // This means that the low_boundary is going to be a little too high. - // This shouldn't matter, since oops of non-entrant methods are never used. - // In fact, why are we bothering to look at oops in a non-entrant method?? - } return low_boundary; } @@ -1653,10 +1645,6 @@ void nmethod::maybe_print_nmethod(const DirectiveSet* directive) { } void nmethod::print_nmethod(bool printmethod) { - // Enter a critical section to prevent a race with deopts that patch code and updates the relocation info. - // Unfortunately, we have to lock the NMethodState_lock before the tty lock due to the deadlock rules and - // cannot lock in a more finely grained manner. - ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag); ttyLocker ttyl; // keep the following output all in one block if (xtty != nullptr) { xtty->begin_head("print_nmethod"); @@ -2040,19 +2028,7 @@ bool nmethod::make_not_entrant(ChangeReason change_reason) { } else { // The caller can be calling the method statically or through an inline // cache call. - NativeJump::patch_verified_entry(entry_point(), verified_entry_point(), - SharedRuntime::get_handle_wrong_method_stub()); - - // Update the relocation info for the patched entry. - // First, get the old relocation info... - RelocIterator iter(this, verified_entry_point(), verified_entry_point() + 8); - if (iter.next() && iter.addr() == verified_entry_point()) { - Relocation* old_reloc = iter.reloc(); - // ...then reset the iterator to update it. - RelocIterator iter(this, verified_entry_point(), verified_entry_point() + 8); - relocInfo::change_reloc_info_for_address(&iter, verified_entry_point(), old_reloc->type(), - relocInfo::relocType::runtime_call_type); - } + BarrierSet::barrier_set()->barrier_set_nmethod()->make_not_entrant(this); } if (update_recompile_counts()) { @@ -2942,9 +2918,6 @@ void nmethod::verify() { if (is_not_entrant()) return; - // Make sure all the entry points are correctly aligned for patching. - NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point()); - // assert(oopDesc::is_oop(method()), "must be valid"); ResourceMark rm; diff --git a/src/hotspot/share/gc/shared/barrierSetNMethod.cpp b/src/hotspot/share/gc/shared/barrierSetNMethod.cpp index 41eb0a24b625a..522000e0a99e6 100644 --- a/src/hotspot/share/gc/shared/barrierSetNMethod.cpp +++ b/src/hotspot/share/gc/shared/barrierSetNMethod.cpp @@ -72,11 +72,25 @@ bool BarrierSetNMethod::supports_entry_barrier(nmethod* nm) { } void BarrierSetNMethod::disarm(nmethod* nm) { - set_guard_value(nm, disarmed_guard_value()); + guard_with(nm, disarmed_guard_value()); +} + +void BarrierSetNMethod::guard_with(nmethod* nm, int value) { + assert((value & not_entrant) == 0, "not_entrant bit is reserved"); + // Enter critical section. Does not block for safepoint. + ConditionalMutexLocker ml(NMethodEntryBarrier_lock, !NMethodEntryBarrier_lock->owned_by_self(), Mutex::_no_safepoint_check_flag); + // Do not undo sticky bit + if (is_not_entrant(nm)) { + value |= not_entrant; + } + if (guard_value(nm) != value) { + // Patch the code only if needed. + set_guard_value(nm, value); + } } bool BarrierSetNMethod::is_armed(nmethod* nm) { - return guard_value(nm) != disarmed_guard_value(); + return (guard_value(nm) & ~not_entrant) != disarmed_guard_value(); } bool BarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) { @@ -152,7 +166,7 @@ void BarrierSetNMethod::arm_all_nmethods() { // seriously wrong. ++_current_phase; if (_current_phase == INT_MAX) { - _current_phase = 1; + _current_phase = initial; } BarrierSetNMethodArmClosure cl(_current_phase); Threads::threads_do(&cl); @@ -178,23 +192,25 @@ int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) { BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); // Called upon first entry after being armed - bool may_enter = bs_nm->nmethod_entry_barrier(nm); + bool may_enter = !bs_nm->is_not_entrant(nm) && bs_nm->nmethod_entry_barrier(nm); assert(!nm->is_osr_method() || may_enter, "OSR nmethods should always be entrant after migration"); - // In case a concurrent thread disarmed the nmethod, we need to ensure the new instructions - // are made visible, by using a cross modify fence. Note that this is synchronous cross modifying - // code, where the existence of new instructions is communicated via data (the guard value). - // This cross modify fence is only needed when the nmethod entry barrier modifies the - // instructions. Not all platforms currently do that, so if this check becomes expensive, - // it can be made conditional on the nmethod_patching_type. - OrderAccess::cross_modify_fence(); - - // Diagnostic option to force deoptimization 1 in 10 times. It is otherwise - // a very rare event. - if (DeoptimizeNMethodBarriersALot && !nm->is_osr_method()) { - static volatile uint32_t counter=0; - if (Atomic::add(&counter, 1u) % 10 == 0) { - may_enter = false; + if (may_enter) { + // In case a concurrent thread disarmed the nmethod, we need to ensure the new instructions + // are made visible, by using a cross modify fence. Note that this is synchronous cross modifying + // code, where the existence of new instructions is communicated via data (the guard value). + // This cross modify fence is only needed when the nmethod entry barrier modifies the + // instructions. Not all platforms currently do that, so if this check becomes expensive, + // it can be made conditional on the nmethod_patching_type. + OrderAccess::cross_modify_fence(); + + // Diagnostic option to force deoptimization 1 in 10 times. It is otherwise + // a very rare event. + if (DeoptimizeNMethodBarriersALot && !nm->is_osr_method()) { + static volatile uint32_t counter=0; + if (Atomic::add(&counter, 1u) % 10 == 0) { + may_enter = false; + } } } @@ -220,3 +236,22 @@ oop BarrierSetNMethod::oop_load_no_keepalive(const nmethod* nm, int index) { oop BarrierSetNMethod::oop_load_phantom(const nmethod* nm, int index) { return NativeAccess::oop_load(nm->oop_addr_at(index)); } + +// Make the nmethod permanently not-entrant, so that nmethod_stub_entry_barrier() will call +// deoptimize() to redirect the caller to SharedRuntime::get_handle_wrong_method_stub(). +// A sticky armed bit is set and other bits are preserved. As a result, a call to +// nmethod_stub_entry_barrier() may appear to be spurious, because is_armed() still returns +// false and nmethod_entry_barrier() is not called. +void BarrierSetNMethod::make_not_entrant(nmethod* nm) { + // Enter critical section. Does not block for safepoint. + ConditionalMutexLocker ml(NMethodEntryBarrier_lock, !NMethodEntryBarrier_lock->owned_by_self(), Mutex::_no_safepoint_check_flag); + int value = guard_value(nm) | not_entrant; + if (guard_value(nm) != value) { + // Patch the code only if needed. + set_guard_value(nm, value); + } +} + +bool BarrierSetNMethod::is_not_entrant(nmethod* nm) { + return (guard_value(nm) & not_entrant) != 0; +} diff --git a/src/hotspot/share/gc/shared/barrierSetNMethod.hpp b/src/hotspot/share/gc/shared/barrierSetNMethod.hpp index 756dc43b3f1da..b905e8869b576 100644 --- a/src/hotspot/share/gc/shared/barrierSetNMethod.hpp +++ b/src/hotspot/share/gc/shared/barrierSetNMethod.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,10 +36,20 @@ class nmethod; class BarrierSetNMethod: public CHeapObj { private: int _current_phase; + enum { + not_entrant = 1 << 31, // armed sticky bit, see make_not_entrant + armed = 0, + initial = 1, + }; + void deoptimize(nmethod* nm, address* return_addr_ptr); +protected: + virtual int guard_value(nmethod* nm); + void set_guard_value(nmethod* nm, int value); + public: - BarrierSetNMethod() : _current_phase(1) {} + BarrierSetNMethod() : _current_phase(initial) {} bool supports_entry_barrier(nmethod* nm); virtual bool nmethod_entry_barrier(nmethod* nm); @@ -50,13 +60,15 @@ class BarrierSetNMethod: public CHeapObj { static int nmethod_stub_entry_barrier(address* return_address_ptr); bool nmethod_osr_entry_barrier(nmethod* nm); - bool is_armed(nmethod* nm); + virtual bool is_armed(nmethod* nm); + void arm(nmethod* nm) { guard_with(nm, armed); } void disarm(nmethod* nm); + virtual void make_not_entrant(nmethod* nm); + virtual bool is_not_entrant(nmethod* nm); - int guard_value(nmethod* nm); - void set_guard_value(nmethod* nm, int value); + virtual void guard_with(nmethod* nm, int value); - void arm_all_nmethods(); + virtual void arm_all_nmethods(); virtual oop oop_load_no_keepalive(const nmethod* nm, int index); virtual oop oop_load_phantom(const nmethod* nm, int index); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp index 49d2df0cc936c..0d9077be2265b 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp @@ -150,7 +150,7 @@ class ShenandoahNMethodUnlinkClosure : public NMethodClosure { ShenandoahNMethod::heal_nmethod_metadata(nm_data); // Code cache unloading needs to know about on-stack nmethods. Arm the nmethods to get // mark_as_maybe_on_stack() callbacks when they are used again. - _bs->set_guard_value(nm, 0); + _bs->arm(nm); } } diff --git a/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp b/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp index 0d6be2b789f77..392d194a65bee 100644 --- a/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp +++ b/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp @@ -105,3 +105,36 @@ oop ZBarrierSetNMethod::oop_load_no_keepalive(const nmethod* nm, int index) { oop ZBarrierSetNMethod::oop_load_phantom(const nmethod* nm, int index) { return ZNMethod::oop_load_phantom(nm, index); } + +void ZBarrierSetNMethod::guard_with(nmethod* nm, int value) { + assert((value & not_entrant) == 0, "not_entrant bit is reserved"); + ZLocker locker(ZNMethod::lock_for_nmethod(nm)); + // Preserve the sticky bit + if (is_not_entrant(nm)) { + value |= not_entrant; + } + if (guard_value(nm) != value) { + // Patch the code only if needed. + set_guard_value(nm, value); + } +} + +bool ZBarrierSetNMethod::is_armed(nmethod* nm) { + int value = guard_value(nm) & ~not_entrant; + return value != disarmed_guard_value(); +} + +void ZBarrierSetNMethod::make_not_entrant(nmethod* nm) { + ZLocker locker(ZNMethod::lock_for_nmethod(nm)); + int value = guard_value(nm) | not_entrant; // permanent sticky value + set_guard_value(nm, value); +} + +bool ZBarrierSetNMethod::is_not_entrant(nmethod* nm) { + return (guard_value(nm) & not_entrant) != 0; +} + +uintptr_t ZBarrierSetNMethod::color(nmethod* nm) { + // color is stored at low order bits of int; conversion to uintptr_t is fine + return uintptr_t(guard_value(nm) & ~not_entrant); +} diff --git a/src/hotspot/share/gc/z/zBarrierSetNMethod.hpp b/src/hotspot/share/gc/z/zBarrierSetNMethod.hpp index 780d3772123e3..e0b7ba6c773aa 100644 --- a/src/hotspot/share/gc/z/zBarrierSetNMethod.hpp +++ b/src/hotspot/share/gc/z/zBarrierSetNMethod.hpp @@ -30,15 +30,27 @@ class nmethod; class ZBarrierSetNMethod : public BarrierSetNMethod { + enum : int { + not_entrant = 1 << 31, // armed sticky bit, see make_not_entrant + }; + protected: virtual bool nmethod_entry_barrier(nmethod* nm); public: + uintptr_t color(nmethod* nm); + virtual ByteSize thread_disarmed_guard_value_offset() const; virtual int* disarmed_guard_value_address() const; virtual oop oop_load_no_keepalive(const nmethod* nm, int index); virtual oop oop_load_phantom(const nmethod* nm, int index); + + virtual void make_not_entrant(nmethod* nm); + virtual bool is_not_entrant(nmethod* nm); + virtual void guard_with(nmethod* nm, int value); + virtual bool is_armed(nmethod* nm); + virtual void arm_all_nmethods() { ShouldNotCallThis(); } }; #endif // SHARE_GC_Z_ZBARRIERSETNMETHOD_HPP diff --git a/src/hotspot/share/gc/z/zMark.cpp b/src/hotspot/share/gc/z/zMark.cpp index 9846f1244ecea..cc153bc925ad3 100644 --- a/src/hotspot/share/gc/z/zMark.cpp +++ b/src/hotspot/share/gc/z/zMark.cpp @@ -769,7 +769,7 @@ class ZMarkYoungNMethodClosure : public NMethodClosure { ZNMethod::nmethod_patch_barriers(nm); } - _bs_nm->set_guard_value(nm, (int)untype(new_disarm_value_ptr)); + _bs_nm->guard_with(nm, (int)untype(new_disarm_value_ptr)); if (complete_disarm) { log_trace(gc, nmethod)("nmethod: " PTR_FORMAT " visited by young (complete) [" PTR_FORMAT " -> " PTR_FORMAT "]", p2i(nm), prev_color, untype(new_disarm_value_ptr)); diff --git a/src/hotspot/share/gc/z/zNMethod.cpp b/src/hotspot/share/gc/z/zNMethod.cpp index bf592c20fa296..3f65d2eea973e 100644 --- a/src/hotspot/share/gc/z/zNMethod.cpp +++ b/src/hotspot/share/gc/z/zNMethod.cpp @@ -241,7 +241,7 @@ void ZNMethod::disarm(nmethod* nm) { void ZNMethod::set_guard_value(nmethod* nm, int value) { BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod(); - bs->set_guard_value(nm, value); + bs->guard_with(nm, value); } void ZNMethod::nmethod_patch_barriers(nmethod* nm) { @@ -300,9 +300,8 @@ void ZNMethod::nmethods_do(bool secondary, NMethodClosure* cl) { } uintptr_t ZNMethod::color(nmethod* nm) { - BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); - // color is stored at low order bits of int; conversion to uintptr_t is fine - return (uintptr_t)bs_nm->guard_value(nm); + ZBarrierSetNMethod* bs_nm = static_cast(BarrierSet::barrier_set()->barrier_set_nmethod()); + return bs_nm->color(nm); } oop ZNMethod::oop_load_no_keepalive(const nmethod* nm, int index) { diff --git a/src/hotspot/share/runtime/mutexLocker.cpp b/src/hotspot/share/runtime/mutexLocker.cpp index 36e0cfe112581..b90f8c23a6dca 100644 --- a/src/hotspot/share/runtime/mutexLocker.cpp +++ b/src/hotspot/share/runtime/mutexLocker.cpp @@ -36,6 +36,7 @@ // Mutexes used in the VM (see comment in mutexLocker.hpp): Mutex* NMethodState_lock = nullptr; +Mutex* NMethodEntryBarrier_lock = nullptr; Monitor* SystemDictionary_lock = nullptr; Mutex* InvokeMethodTypeTable_lock = nullptr; Monitor* InvokeMethodIntrinsicTable_lock = nullptr; @@ -205,6 +206,8 @@ void assert_lock_strong(const Mutex* lock) { void mutex_init() { MUTEX_DEFN(tty_lock , PaddedMutex , tty); // allow to lock in VM + MUTEX_DEFN(NMethodEntryBarrier_lock , PaddedMutex , service-1); + MUTEX_DEFN(STS_lock , PaddedMonitor, nosafepoint); if (UseG1GC) { diff --git a/src/hotspot/share/runtime/mutexLocker.hpp b/src/hotspot/share/runtime/mutexLocker.hpp index 6baa93b257933..640747c3fe91c 100644 --- a/src/hotspot/share/runtime/mutexLocker.hpp +++ b/src/hotspot/share/runtime/mutexLocker.hpp @@ -34,6 +34,7 @@ class Thread; // Mutexes used in the VM. extern Mutex* NMethodState_lock; // a lock used to guard a compiled method state +extern Mutex* NMethodEntryBarrier_lock; // protects nmethod entry barrier extern Monitor* SystemDictionary_lock; // a lock on the system dictionary extern Mutex* InvokeMethodTypeTable_lock; extern Monitor* InvokeMethodIntrinsicTable_lock;