Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions src/hotspot/share/c1/c1_LIRGenerator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1656,6 +1656,11 @@ void LIRGenerator::do_StoreField(StoreField* x) {
assert(!vk->contains_oops() || !UseZGC, "ZGC does not support embedded oops in flat fields");
#endif

if (!field->is_null_free() && !vk->nullable_atomic_layout_is_natural()) {
bailout("missing support for unnatural nullable atomic layout");
return;
}

// Zero the payload
BasicType bt = vk->atomic_size_to_basic_type(field->is_null_free());
LIR_Opr payload = new_register((bt == T_LONG) ? bt : T_INT);
Expand Down Expand Up @@ -2103,6 +2108,11 @@ void LIRGenerator::do_LoadField(LoadField* x) {
assert(x->state_before() != nullptr, "Needs state before");
#endif

if (!field->is_null_free() && !vk->nullable_atomic_layout_is_natural()) {
bailout("missing support for unnatural nullable atomic layout");
return;
}

// Allocate buffer (we can't easily do this conditionally on the null check below
// because branches added in the LIR are opaque to the register allocator).
NewInstance* buffer = new NewInstance(vk, x->state_before(), false, true);
Expand Down
6 changes: 6 additions & 0 deletions src/hotspot/share/ci/ciInlineKlass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -106,12 +106,18 @@ int ciInlineKlass::null_marker_offset_in_payload() const {
GUARDED_VM_ENTRY(return get_InlineKlass()->null_marker_offset_in_payload();)
}

bool ciInlineKlass::nullable_atomic_layout_is_natural() const {
assert(has_nullable_atomic_layout(), "must have the layout to query its nature");
GUARDED_VM_ENTRY(return get_InlineKlass()->nullable_atomic_layout_is_natural();)
}

// Convert size of atomic layout in bytes to corresponding BasicType
BasicType ciInlineKlass::atomic_size_to_basic_type(bool null_free) const {
VM_ENTRY_MARK
InlineKlass* vk = get_InlineKlass();
assert(!null_free || vk->has_atomic_layout(), "No null-free atomic layout available");
assert( null_free || vk->has_nullable_atomic_layout(), "No nullable atomic layout available");
assert( null_free || nullable_atomic_layout_is_natural(), "Cannot access the nullable atomic layout naturally");
int size = null_free ? vk->atomic_size_in_bytes() : vk->nullable_atomic_size_in_bytes();
BasicType bt = T_ILLEGAL;
if (size == sizeof(jlong)) {
Expand Down
4 changes: 4 additions & 0 deletions src/hotspot/share/ci/ciInlineKlass.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,10 @@ class ciInlineKlass : public ciInstanceKlass {
bool has_atomic_layout() const;
bool has_nullable_atomic_layout() const;
int null_marker_offset_in_payload() const;

// Whether we can access a nullable atomic field of this type using a single memory instruction.
// Otherwise, we must access the payload and the null marker parts separately. See InlineKlass.
bool nullable_atomic_layout_is_natural() const;
BasicType atomic_size_to_basic_type(bool null_free) const;

bool must_be_atomic() const;
Expand Down
30 changes: 17 additions & 13 deletions src/hotspot/share/classfile/fieldLayoutBuilder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ static void get_size_and_alignment(InlineKlass* vk, LayoutKind kind, int* size,
break;
case LayoutKind::NULLABLE_ATOMIC_FLAT:
*size = vk->nullable_atomic_size_in_bytes();
*alignment = *size;
*alignment = vk->payload_alignment();
break;
default:
ShouldNotReachHere();
Expand Down Expand Up @@ -1144,11 +1144,13 @@ void FieldLayoutBuilder::compute_inline_class_layout() {
_non_atomic_layout_alignment = _payload_alignment;
}

int required_alignment = _payload_alignment;
// Next step is to compute the characteristics for a layout enabling atomic updates
if (UseAtomicValueFlattening) {
int atomic_size = _payload_size_in_bytes == 0 ? 0 : round_up_power_of_2(_payload_size_in_bytes);
if (atomic_size <= (int)MAX_ATOMIC_OP_SIZE) {
_atomic_layout_size_in_bytes = atomic_size;
required_alignment = MAX2(required_alignment, atomic_size);
}
}

Expand Down Expand Up @@ -1191,13 +1193,21 @@ void FieldLayoutBuilder::compute_inline_class_layout() {
if (nullable_size <= (int)MAX_ATOMIC_OP_SIZE) {
_nullable_layout_size_in_bytes = nullable_size;
_null_marker_offset = null_marker_offset;
required_alignment = MAX2(required_alignment, nullable_size);
} else {
// If the nullable layout is rejected, the NULL_MARKER block should be removed
// from the layout, otherwise it will appear anyway if the layout is printer
if (!_is_empty_inline_class) { // empty values don't have a dedicated NULL_MARKER block
_layout->remove_null_marker();
if (_atomic_layout_size_in_bytes > 0 && _nonstatic_oopmap_count == 0) {
// Don't do this if the payload has an oop, storing null ignores the payload, which may
// result in the objects there being unnecessarily kept by the GC (a.k.a memory leak)
_nullable_layout_size_in_bytes = _atomic_layout_size_in_bytes + 1;
_null_marker_offset = null_marker_offset;
} else {
// If the nullable layout is rejected, the NULL_MARKER block should be removed
// from the layout, otherwise it will appear anyway if the layout is printer
if (!_is_empty_inline_class) { // empty values don't have a dedicated NULL_MARKER block
_layout->remove_null_marker();
}
_null_marker_offset = -1;
}
_null_marker_offset = -1;
}
}
// If the inline class has an atomic or nullable (which is also atomic) layout,
Expand All @@ -1207,13 +1217,7 @@ void FieldLayoutBuilder::compute_inline_class_layout() {
// doesn't have inherited fields (offsets of inherited fields cannot be changed). If a
// field shift is needed but not possible, all atomic layouts are disabled and only reference
// and loosely consistent are supported.
int required_alignment = _payload_alignment;
if (has_atomic_layout() && _payload_alignment < atomic_layout_size_in_bytes()) {
required_alignment = atomic_layout_size_in_bytes();
}
if (has_nullable_atomic_layout() && _payload_alignment < nullable_layout_size_in_bytes()) {
required_alignment = nullable_layout_size_in_bytes();
}
assert(is_power_of_2(required_alignment), "%s does not have a valid alignment: %d", _classname->as_utf8(), required_alignment);
int shift = first_field->offset() % required_alignment;
if (shift != 0) {
if (required_alignment > _payload_alignment && !_layout->has_inherited_fields()) {
Expand Down
28 changes: 22 additions & 6 deletions src/hotspot/share/oops/inlineKlass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,17 +40,20 @@
#include "oops/flatArrayKlass.hpp"
#include "oops/inlineKlass.inline.hpp"
#include "oops/instanceKlass.inline.hpp"
#include "oops/layoutKind.hpp"
#include "oops/method.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/oop.inline.hpp"
#include "oops/refArrayKlass.hpp"
#include "runtime/fieldDescriptor.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/safepointVerifiers.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/signature.hpp"
#include "runtime/thread.inline.hpp"
#include "utilities/copy.hpp"
#include "utilities/powerOfTwo.hpp"
#include "utilities/stringUtils.hpp"

// Constructor
Expand Down Expand Up @@ -188,24 +191,37 @@ void InlineKlass::copy_payload_to_addr(void* src, void* dst, LayoutKind lk, bool
assert(lk != LayoutKind::REFERENCE && lk != LayoutKind::UNKNOWN, "Sanity check");
switch(lk) {
case LayoutKind::NULLABLE_ATOMIC_FLAT: {
if (is_payload_marked_as_null((address)src)) {
if (is_payload_marked_as_null((address)src)) {
if (!contains_oops()) {
mark_payload_as_null((address)dst);
return;
}
// copy null_reset value to dest
assert(nullable_atomic_layout_is_natural(), "classes with unnatural nullable layout should not contain oops");
if (dest_is_initialized) {
HeapAccess<>::value_copy(payload_addr(null_reset_value()), dst, this, lk);
} else {
HeapAccess<IS_DEST_UNINITIALIZED>::value_copy(payload_addr(null_reset_value()), dst, this, lk);
}
} else {
// Copy has to be performed, even if this is an empty value, because of the null marker
mark_payload_as_non_null((address)src);
if (dest_is_initialized) {
HeapAccess<>::value_copy(src, dst, this, lk);
if (!nullable_atomic_layout_is_natural()) {
// Copy the payload and the null marker separately
OrderAccess::acquire(); // Acquire between loading the null marker and the payload in src
if (dest_is_initialized) {
HeapAccess<>::value_copy(src, dst, this, LayoutKind::ATOMIC_FLAT);
} else {
HeapAccess<IS_DEST_UNINITIALIZED>::value_copy(src, dst, this, LayoutKind::ATOMIC_FLAT);
}
OrderAccess::release(); // Release between storing the payload and the null marker in dst
mark_payload_as_non_null((address)dst);
} else {
HeapAccess<IS_DEST_UNINITIALIZED>::value_copy(src, dst, this, lk);
// Copy has to be performed, even if this is an empty value, because of the null marker
mark_payload_as_non_null((address)src);
if (dest_is_initialized) {
HeapAccess<>::value_copy(src, dst, this, lk);
} else {
HeapAccess<IS_DEST_UNINITIALIZED>::value_copy(src, dst, this, lk);
}
}
}
}
Expand Down
56 changes: 56 additions & 0 deletions src/hotspot/share/oops/inlineKlass.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,62 @@
#include "oops/instanceKlass.hpp"
#include "oops/method.hpp"
#include "runtime/registerMap.hpp"
#include "utilities/powerOfTwo.hpp"

// An InlineKlass is a specialized InstanceKlass for concrete value classes
// (abstract value classes are represented by InstanceKlass)

/**
There are 2 ways to access a nullable atomic field or array element. If the payload including the
null marker fits into a jlong, then we can just access the element as a whole. Otherwise, we can
try another strategy, since the payload is only relevant if the null marker is 1. We can achieve
a field that is accessed as if it is atomic even if the access consists of 2 native accesses.

A store of a not-null Long into a nullable Long field can be executed as:

store field.value;
release_fence;
store field.null_marker;

and the store of a null into that field will be:

store field.null_marker;

While, a load can be executed as:

load field.null_marker;
acquire_fence;
load field.value;

What we need to prove is that, given n concurrent stores, then:

1. The final state of the memory must be one of the executed stores:
Consider the stores into the null marker:
- If the last state of the null marker is 0, then the field is null
- If the last state of the null marker is 1, then the field is non-null. In this case, only the
threads that store non-null Long objects touch the memory of value. One of which would be the
last state of the memory here. And it is as if we have a single non-null store that is the
last state.

Note that the fences are irrelevant for these conditions.

2. Given a concurrent load, then it must either observe the initial state, or 1 of the
stores that is executing:

- If it observes the null marker being 0, then it observes field being null. In this case, only
the null marker is relevant, and it is trivially atomic.
- If it observes the null marker being 1, then it observes field being non-null. In this case,
if the initial state is null, we must observe the null marker being stored by 1 of the
threads. And since we have fences, we must at least observe the value stored by that thread
(or another thread, the point here is that we cannot observe the value in its initial state).
Otherwise, the original state is non-null, we must observe the initial value or one of the
values stored by the threads that try to store non-null.

As a result, we can see that in any case, the field accesses act as it they are atomic.

Note that a store of null to a flattened field ignores the payload, so we avoid flattening like
this if the class has oop fields because they can leak.
*/
class InlineKlass: public InstanceKlass {
friend class VMStructs;
friend class InstanceKlass;
Expand Down Expand Up @@ -157,6 +208,11 @@ class InlineKlass: public InstanceKlass {
int null_marker_offset_in_payload() const { return null_marker_offset() - payload_offset(); }
void set_null_marker_offset(int offset) { *(int*)adr_null_marker_offset() = offset; }

bool nullable_atomic_layout_is_natural() const {
assert(has_nullable_atomic_layout(), "must have the layout the query its nature");
return is_power_of_2(nullable_atomic_size_in_bytes());
}

bool is_payload_marked_as_null(address payload) {
assert(has_nullable_atomic_layout(), " Must have");
return *((jbyte*)payload + null_marker_offset_in_payload()) == 0;
Expand Down
Loading