Skip to content

Commit

Permalink
Revert of Clean up aligned allocation code in preparation for SIMD al…
Browse files Browse the repository at this point in the history
…ignments. (patchset crosswalk-project#14 id:300001 of https://codereview.chromium.org/1150593003/)

Reason for revert:
Breaks mjsunit, webkit, mozilla, benchmarks.

[email protected]

Original issue's description:
> Clean up aligned allocation code in preparation for SIMD alignments.
>
> Moves alignment fill calculations into two static Heap methods.
> Adds a Heap method to handle the complex case where filler is potentially needed before and after a heap object.
> Makes DoubleAlignForDeserialization explicitly fill after an already
> aligned object.
>
> LOG=N
> BUG=v8:4124
>
> Committed: https://crrev.com/fcfb080eb9a637f0ae066bed4c45095e60df8a84
> Cr-Commit-Position: refs/heads/master@{#28687}

[email protected],[email protected]
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=v8:4124

Review URL: https://codereview.chromium.org/1159123002

Cr-Commit-Position: refs/heads/master@{#28688}
  • Loading branch information
bbudge authored and Commit bot committed May 28, 2015
1 parent fcfb080 commit 3ee926e
Show file tree
Hide file tree
Showing 6 changed files with 82 additions and 247 deletions.
57 changes: 17 additions & 40 deletions src/heap/heap.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1986,54 +1986,31 @@ STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) !=
#endif


int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
switch (alignment) {
case kWordAligned:
return 0;
case kDoubleAligned:
case kDoubleUnaligned:
return kDoubleSize - kPointerSize;
default:
UNREACHABLE();
HeapObject* Heap::EnsureAligned(HeapObject* object, int size,
AllocationAlignment alignment) {
if (alignment == kDoubleAligned &&
(OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
CreateFillerObjectAt(object->address(), kPointerSize);
return HeapObject::FromAddress(object->address() + kPointerSize);
} else if (alignment == kDoubleUnaligned &&
(OffsetFrom(object->address()) & kDoubleAlignmentMask) == 0) {
CreateFillerObjectAt(object->address(), kPointerSize);
return HeapObject::FromAddress(object->address() + kPointerSize);
} else {
CreateFillerObjectAt(object->address() + size - kPointerSize, kPointerSize);
return object;
}
return 0;
}


int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
intptr_t offset = OffsetFrom(address);
if (alignment == kDoubleAligned && (offset & kDoubleAlignmentMask) != 0)
return kPointerSize;
if (alignment == kDoubleUnaligned && (offset & kDoubleAlignmentMask) == 0)
return kDoubleSize - kPointerSize; // No fill if double is always aligned.
return 0;
}


HeapObject* Heap::PrecedeWithFiller(HeapObject* object, int filler_size) {
CreateFillerObjectAt(object->address(), filler_size);
return HeapObject::FromAddress(object->address() + filler_size);
}


HeapObject* Heap::AlignWithFiller(HeapObject* object, int object_size,
int allocation_size,
AllocationAlignment alignment) {
int filler_size = allocation_size - object_size;
DCHECK(filler_size > 0);
int pre_filler = GetFillToAlign(object->address(), alignment);
if (pre_filler) {
object = PrecedeWithFiller(object, pre_filler);
filler_size -= pre_filler;
}
if (filler_size)
CreateFillerObjectAt(object->address() + object_size, filler_size);
return object;
HeapObject* Heap::PrecedeWithFiller(HeapObject* object) {
CreateFillerObjectAt(object->address(), kPointerSize);
return HeapObject::FromAddress(object->address() + kPointerSize);
}


HeapObject* Heap::DoubleAlignForDeserialization(HeapObject* object, int size) {
return AlignWithFiller(object, size, size + kPointerSize, kDoubleAligned);
return EnsureAligned(object, size, kDoubleAligned);
}


Expand Down
24 changes: 7 additions & 17 deletions src/heap/heap.h
Original file line number Diff line number Diff line change
Expand Up @@ -716,23 +716,13 @@ class Heap {
MUST_USE_RESULT AllocationResult
CopyJSObject(JSObject* source, AllocationSite* site = NULL);

// Calculates the maximum amount of filler that could be required by the
// given alignment.
static int GetMaximumFillToAlign(AllocationAlignment alignment);
// Calculates the actual amount of filler required for a given address at the
// given alignment.
static int GetFillToAlign(Address address, AllocationAlignment alignment);

// Creates a filler object and returns a heap object immediately after it.
MUST_USE_RESULT HeapObject* PrecedeWithFiller(HeapObject* object,
int filler_size);
// Creates a filler object if needed for alignment and returns a heap object
// immediately after it. If any space is left after the returned object,
// another filler object is created so the over allocated memory is iterable.
MUST_USE_RESULT HeapObject* AlignWithFiller(HeapObject* object,
int object_size,
int allocation_size,
AllocationAlignment alignment);
// This method assumes overallocation of one word. It will store a filler
// before the object if the given object is not double aligned, otherwise
// it will place the filler after the object.
MUST_USE_RESULT HeapObject* EnsureAligned(HeapObject* object, int size,
AllocationAlignment alignment);

MUST_USE_RESULT HeapObject* PrecedeWithFiller(HeapObject* object);

// Clear the Instanceof cache (used when a prototype changes).
inline void ClearInstanceofCache();
Expand Down
66 changes: 41 additions & 25 deletions src/heap/spaces-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -250,21 +250,28 @@ HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
}


HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes,
HeapObject* PagedSpace::AllocateLinearlyAligned(int size_in_bytes,
AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(current_top, alignment);

Address new_top = current_top + filler_size + *size_in_bytes;
int alignment_size = 0;

if (alignment == kDoubleAligned &&
(OffsetFrom(current_top) & kDoubleAlignmentMask) != 0) {
alignment_size = kPointerSize;
size_in_bytes += alignment_size;
} else if (alignment == kDoubleUnaligned &&
(OffsetFrom(current_top) & kDoubleAlignmentMask) == 0) {
alignment_size = kPointerSize;
size_in_bytes += alignment_size;
}
Address new_top = current_top + size_in_bytes;
if (new_top > allocation_info_.limit()) return NULL;

allocation_info_.set_top(new_top);
if (filler_size > 0) {
*size_in_bytes += filler_size;
return heap()->PrecedeWithFiller(HeapObject::FromAddress(current_top),
filler_size);
if (alignment_size > 0) {
return heap()->EnsureAligned(HeapObject::FromAddress(current_top),
size_in_bytes, alignment);
}

return HeapObject::FromAddress(current_top);
}

Expand Down Expand Up @@ -296,26 +303,21 @@ AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment) {
DCHECK(identity() == OLD_SPACE);
int allocation_size = size_in_bytes;
HeapObject* object = AllocateLinearlyAligned(&allocation_size, alignment);
HeapObject* object = AllocateLinearlyAligned(size_in_bytes, alignment);
int aligned_size_in_bytes = size_in_bytes + kPointerSize;

if (object == NULL) {
// We don't know exactly how much filler we need to align until space is
// allocated, so assume the worst case.
int filler_size = Heap::GetMaximumFillToAlign(alignment);
allocation_size += filler_size;
object = free_list_.Allocate(allocation_size);
object = free_list_.Allocate(aligned_size_in_bytes);
if (object == NULL) {
object = SlowAllocateRaw(allocation_size);
object = SlowAllocateRaw(aligned_size_in_bytes);
}
if (object != NULL && filler_size != 0) {
object = heap()->AlignWithFiller(object, size_in_bytes, allocation_size,
alignment);
if (object != NULL) {
object = heap()->EnsureAligned(object, aligned_size_in_bytes, alignment);
}
}

if (object != NULL) {
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), allocation_size);
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
return object;
}

Expand All @@ -342,8 +344,19 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment) {
Address old_top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(old_top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
int alignment_size = 0;
int aligned_size_in_bytes = 0;

// If double alignment is required and top pointer is not aligned, we allocate
// additional memory to take care of the alignment.
if (alignment == kDoubleAligned &&
(OffsetFrom(old_top) & kDoubleAlignmentMask) != 0) {
alignment_size += kPointerSize;
} else if (alignment == kDoubleUnaligned &&
(OffsetFrom(old_top) & kDoubleAlignmentMask) == 0) {
alignment_size += kPointerSize;
}
aligned_size_in_bytes = size_in_bytes + alignment_size;

if (allocation_info_.limit() - old_top < aligned_size_in_bytes) {
return SlowAllocateRaw(size_in_bytes, alignment);
Expand All @@ -353,13 +366,16 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
allocation_info_.set_top(allocation_info_.top() + aligned_size_in_bytes);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);

if (filler_size > 0) {
obj = heap()->PrecedeWithFiller(obj, filler_size);
if (alignment_size > 0) {
obj = heap()->PrecedeWithFiller(obj);
}

// The slow path above ultimately goes through AllocateRaw, so this suffices.
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);

DCHECK((kDoubleAligned && (OffsetFrom(obj) & kDoubleAlignmentMask) == 0) ||
(kDoubleUnaligned && (OffsetFrom(obj) & kDoubleAlignmentMask) != 0));

return obj;
}

Expand Down
23 changes: 14 additions & 9 deletions src/heap/spaces.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1455,28 +1455,33 @@ AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes,
Address old_top = allocation_info_.top();
Address high = to_space_.page_high();
if (allocation_info_.limit() < high) {
int alignment_size = Heap::GetFillToAlign(old_top, alignment);
int aligned_size_in_bytes = size_in_bytes + alignment_size;

// Either the limit has been lowered because linear allocation was disabled
// or because incremental marking wants to get a chance to do a step. Set
// the new limit accordingly.
Address new_top = old_top + aligned_size_in_bytes;
int aligned_size = size_in_bytes;
aligned_size += (alignment != kWordAligned) ? kPointerSize : 0;
Address new_top = old_top + aligned_size;
int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
heap()->incremental_marking()->Step(bytes_allocated,
IncrementalMarking::GC_VIA_STACK_GUARD);
UpdateInlineAllocationLimit(aligned_size_in_bytes);
UpdateInlineAllocationLimit(aligned_size);
top_on_previous_step_ = new_top;
if (alignment == kWordAligned) return AllocateRawUnaligned(size_in_bytes);
return AllocateRawAligned(size_in_bytes, alignment);
if (alignment == kDoubleAligned)
return AllocateRawAligned(size_in_bytes, kDoubleAligned);
else if (alignment == kDoubleUnaligned)
return AllocateRawAligned(size_in_bytes, kDoubleUnaligned);
return AllocateRawUnaligned(size_in_bytes);
} else if (AddFreshPage()) {
// Switched to new page. Try allocating again.
int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
heap()->incremental_marking()->Step(bytes_allocated,
IncrementalMarking::GC_VIA_STACK_GUARD);
top_on_previous_step_ = to_space_.page_low();
if (alignment == kWordAligned) return AllocateRawUnaligned(size_in_bytes);
return AllocateRawAligned(size_in_bytes, alignment);
if (alignment == kDoubleAligned)
return AllocateRawAligned(size_in_bytes, kDoubleAligned);
else if (alignment == kDoubleUnaligned)
return AllocateRawAligned(size_in_bytes, kDoubleUnaligned);
return AllocateRawUnaligned(size_in_bytes);
} else {
return AllocationResult::Retry();
}
Expand Down
7 changes: 3 additions & 4 deletions src/heap/spaces.h
Original file line number Diff line number Diff line change
Expand Up @@ -1931,10 +1931,9 @@ class PagedSpace : public Space {
// address denoted by top in allocation_info_.
inline HeapObject* AllocateLinearly(int size_in_bytes);

// Generic fast case allocation function that tries aligned linear allocation
// at the address denoted by top in allocation_info_. Writes the aligned
// allocation size, which includes the filler size, to size_in_bytes.
inline HeapObject* AllocateLinearlyAligned(int* size_in_bytes,
// Generic fast case allocation function that tries double aligned linear
// allocation at the address denoted by top in allocation_info_.
inline HeapObject* AllocateLinearlyAligned(int size_in_bytes,
AllocationAlignment alignment);

// If sweeping is still in progress try to sweep unswept pages. If that is
Expand Down
Loading

0 comments on commit 3ee926e

Please sign in to comment.