Skip to content

Commit

Permalink
[vm] Assign top-level classes cids outside 16-bit range
Browse files Browse the repository at this point in the history
Right now we assign class ids to top-level classes, abstract classes as
well as concrete classes. All of them have allocated from a 16-bit pool
of ids. The VM FATAL()s once it hits that limit.

Customers who run very large programs (significant amount of generated
code) on the Dart VM have started to hit this 16-bit class limit.

Concrete classes can have instances in the heap. Our current heap layout
only allows 16-bit class ids to be encoded in the header word. To avoid
increasing the size of heap objects or shrinking the size of the identity
hash code to 16-bit we keep class ids in object headers to be 16-bit.

Abstract classes cannot have instances in the heap. Though their class
ids are encoded in type objects. Furthermore we sort classes in
AOT/AppJIT mode to perform fast class-id range checks. To avoid impacting
this optimization we treat abstract classes the same way as concrete
classes.

Top-level classes cannot have instances in the heap. Their class ids are
only used in the runtime code, for example for hot-reload as well as
part of the service protocol.

=> We can allocate class ids outside the 16-bit range for top-level
classes, thereby freeing a significant amount of space in the 16-bit
range.

This CL does exactly that: We change classid_t to be int32_t. The
ClassLayout::id_ can now be assigned ids outside 16-bit range for
top-level classes. To do this we keep dart classes and top level classes
as separate arrays in the ClassTable.

Issue #42533

See also b/160229360

Change-Id: I6710a644e7b0ab2d4f4c792bef8e1f91cb117421
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/153607
Commit-Queue: Martin Kustermann <[email protected]>
Reviewed-by: Ryan Macnak <[email protected]>
  • Loading branch information
mkustermann authored and [email protected] committed Jul 9, 2020
1 parent 4f66bb0 commit d215ab6
Show file tree
Hide file tree
Showing 24 changed files with 470 additions and 127 deletions.
6 changes: 4 additions & 2 deletions runtime/platform/globals.h
Original file line number Diff line number Diff line change
Expand Up @@ -479,8 +479,10 @@ const int64_t kSignBitDouble = DART_INT64_C(0x8000000000000000);
typedef intptr_t word;
typedef uintptr_t uword;

// Size of a class id.
typedef uint16_t classid_t;
// Size of a class id assigned to concrete, abstract and top-level classes.
//
// We use a signed integer type here to make it comparable with intptr_t.
typedef int32_t classid_t;

// Byte sizes.
const int kWordSize = sizeof(word);
Expand Down
9 changes: 7 additions & 2 deletions runtime/vm/class_finalizer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1170,7 +1170,7 @@ void ClassFinalizer::FinalizeClass(const Class& cls) {
RemoveCHAOptimizedCode(cls, cids);
}

if (FLAG_use_cha_deopt) {
if (FLAG_use_cha_deopt && !cls.IsTopLevel()) {
Zone* zone = thread->zone();
ClassTable* class_table = thread->isolate()->class_table();
auto& interface_class = Class::Handle(zone);
Expand Down Expand Up @@ -1482,7 +1482,12 @@ class CidRewriteVisitor : public ObjectVisitor {
void VisitObject(ObjectPtr obj) {
if (obj->IsClass()) {
ClassPtr cls = Class::RawCast(obj);
cls->ptr()->id_ = Map(cls->ptr()->id_);
const classid_t old_cid = cls->ptr()->id_;
if (ClassTable::IsTopLevelCid(old_cid)) {
// We don't remap cids of top level classes.
return;
}
cls->ptr()->id_ = Map(old_cid);
} else if (obj->IsField()) {
FieldPtr field = Field::RawCast(obj);
field->ptr()->guarded_cid_ = Map(field->ptr()->guarded_cid_);
Expand Down
117 changes: 103 additions & 14 deletions runtime/vm/class_table.cc
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

#include "vm/class_table.h"

#include <limits>
#include <memory>

#include "platform/atomic.h"
Expand Down Expand Up @@ -81,7 +82,10 @@ void ClassTable::set_table(ClassPtr* table) {
ClassTable::ClassTable(SharedClassTable* shared_class_table)
: top_(kNumPredefinedCids),
capacity_(0),
table_(NULL),
tlc_top_(0),
tlc_capacity_(0),
table_(nullptr),
tlc_table_(nullptr),
old_class_tables_(new MallocGrowableArray<ClassPtr*>()),
shared_class_table_(shared_class_table) {
if (Dart::vm_isolate() == NULL) {
Expand Down Expand Up @@ -122,6 +126,7 @@ ClassTable::~ClassTable() {
delete old_class_tables_;
}
free(table_.load());
free(tlc_table_.load());
}

void ClassTable::AddOldTable(ClassPtr* old_class_table) {
Expand Down Expand Up @@ -149,7 +154,8 @@ void SharedClassTable::FreeOldTables() {
void ClassTable::Register(const Class& cls) {
ASSERT(Thread::Current()->IsMutatorThread());

const intptr_t index = cls.id();
const classid_t cid = cls.id();
ASSERT(!IsTopLevelCid(cid));

// During the transition period we would like [SharedClassTable] to operate in
// parallel to [ClassTable].
Expand All @@ -158,12 +164,12 @@ void ClassTable::Register(const Class& cls) {
cls.is_abstract() ? 0 : Class::host_instance_size(cls.raw());

const intptr_t expected_cid =
shared_class_table_->Register(index, instance_size);
shared_class_table_->Register(cid, instance_size);

if (index != kIllegalCid) {
ASSERT(index > 0 && index < kNumPredefinedCids && index < top_);
ASSERT(table_.load()[index] == nullptr);
table_.load()[index] = cls.raw();
if (cid != kIllegalCid) {
ASSERT(cid > 0 && cid < kNumPredefinedCids && cid < top_);
ASSERT(table_.load()[cid] == nullptr);
table_.load()[cid] = cls.raw();
} else {
if (top_ == capacity_) {
const intptr_t new_capacity = capacity_ + kCapacityIncrement;
Expand All @@ -177,6 +183,28 @@ void ClassTable::Register(const Class& cls) {
ASSERT(expected_cid == cls.id());
}

void ClassTable::RegisterTopLevel(const Class& cls) {
if (top_ >= std::numeric_limits<classid_t>::max()) {
FATAL1("Fatal error in ClassTable::RegisterTopLevel: invalid index %" Pd
"\n",
top_);
}

ASSERT(Thread::Current()->IsMutatorThread());

const intptr_t index = cls.id();
ASSERT(index == kIllegalCid);

if (tlc_top_ == tlc_capacity_) {
const intptr_t new_capacity = tlc_capacity_ + kCapacityIncrement;
GrowTopLevel(new_capacity);
}
ASSERT(tlc_top_ < tlc_capacity_);
cls.set_id(ClassTable::CidFromTopLevelIndex(tlc_top_));
tlc_table_.load()[tlc_top_] = cls.raw();
tlc_top_++; // Increment next index.
}

intptr_t SharedClassTable::Register(intptr_t index, intptr_t size) {
if (!Class::is_valid_id(top_)) {
FATAL1("Fatal error in SharedClassTable::Register: invalid index %" Pd "\n",
Expand All @@ -202,6 +230,11 @@ intptr_t SharedClassTable::Register(intptr_t index, intptr_t size) {
}

void ClassTable::AllocateIndex(intptr_t index) {
if (IsTopLevelCid(index)) {
AllocateTopLevelIndex(index);
return;
}

// This is called by a snapshot reader.
shared_class_table_->AllocateIndex(index);
ASSERT(Class::is_valid_id(index));
Expand All @@ -220,6 +253,21 @@ void ClassTable::AllocateIndex(intptr_t index) {
ASSERT(capacity_ == shared_class_table_->capacity_);
}

void ClassTable::AllocateTopLevelIndex(intptr_t cid) {
ASSERT(IsTopLevelCid(cid));
const intptr_t tlc_index = IndexFromTopLevelCid(cid);

if (tlc_index >= tlc_capacity_) {
const intptr_t new_capacity = tlc_index + kCapacityIncrement;
GrowTopLevel(new_capacity);
}

ASSERT(tlc_table_.load()[tlc_index] == nullptr);
if (tlc_index >= tlc_top_) {
tlc_top_ = tlc_index + 1;
}
}

void ClassTable::Grow(intptr_t new_capacity) {
ASSERT(new_capacity > capacity_);

Expand All @@ -243,6 +291,29 @@ void ClassTable::Grow(intptr_t new_capacity) {
capacity_ = new_capacity;
}

void ClassTable::GrowTopLevel(intptr_t new_capacity) {
ASSERT(new_capacity > tlc_capacity_);

auto old_table = tlc_table_.load();
auto new_table = static_cast<ClassPtr*>(
malloc(new_capacity * sizeof(ClassPtr))); // NOLINT
intptr_t i;
for (i = 0; i < tlc_capacity_; i++) {
// Don't use memmove, which changes this from a relaxed atomic operation
// to a non-atomic operation.
new_table[i] = old_table[i];
}
for (; i < new_capacity; i++) {
// Don't use memset, which changes this from a relaxed atomic operation
// to a non-atomic operation.
new_table[i] = 0;
}
old_class_tables_->Add(old_table);

tlc_table_.store(new_table);
tlc_capacity_ = new_capacity;
}

void SharedClassTable::AllocateIndex(intptr_t index) {
// This is called by a snapshot reader.
ASSERT(Class::is_valid_id(index));
Expand Down Expand Up @@ -320,9 +391,16 @@ void SharedClassTable::Grow(intptr_t new_capacity) {
capacity_ = new_capacity;
}

void ClassTable::Unregister(intptr_t index) {
shared_class_table_->Unregister(index);
table_.load()[index] = nullptr;
void ClassTable::Unregister(intptr_t cid) {
ASSERT(!IsTopLevelCid(cid));
shared_class_table_->Unregister(cid);
table_.load()[cid] = nullptr;
}

void ClassTable::UnregisterTopLevel(intptr_t cid) {
ASSERT(IsTopLevelCid(cid));
const intptr_t tlc_index = IndexFromTopLevelCid(cid);
tlc_table_.load()[tlc_index] = nullptr;
}

void SharedClassTable::Unregister(intptr_t index) {
Expand Down Expand Up @@ -376,6 +454,12 @@ void ClassTable::VisitObjectPointers(ObjectPointerVisitor* visitor) {
ObjectPtr* to = reinterpret_cast<ObjectPtr*>(&table[top_ - 1]);
visitor->VisitPointers(from, to);
}
if (tlc_top_ != 0) {
auto* tlc_table = tlc_table_.load();
ObjectPtr* from = reinterpret_cast<ObjectPtr*>(&tlc_table[0]);
ObjectPtr* to = reinterpret_cast<ObjectPtr*>(&tlc_table[tlc_top_ - 1]);
visitor->VisitPointers(from, to);
}
visitor->clear_gc_root_type();
}

Expand Down Expand Up @@ -420,13 +504,18 @@ void ClassTable::Print() {
}
}

void ClassTable::SetAt(intptr_t index, ClassPtr raw_cls) {
void ClassTable::SetAt(intptr_t cid, ClassPtr raw_cls) {
if (IsTopLevelCid(cid)) {
tlc_table_.load()[IndexFromTopLevelCid(cid)] = raw_cls;
return;
}

// This is called by snapshot reader and class finalizer.
ASSERT(index < capacity_);
ASSERT(cid < capacity_);
const intptr_t size =
raw_cls == nullptr ? 0 : Class::host_instance_size(raw_cls);
shared_class_table_->SetSizeAt(index, size);
table_.load()[index] = raw_cls;
shared_class_table_->SetSizeAt(cid, size);
table_.load()[cid] = raw_cls;
}

#ifndef PRODUCT
Expand Down
Loading

0 comments on commit d215ab6

Please sign in to comment.