diff --git a/doc/hotspot-style.html b/doc/hotspot-style.html index 0305bfeca03bd..9f26fc6636205 100644 --- a/doc/hotspot-style.html +++ b/doc/hotspot-style.html @@ -217,10 +217,10 @@

Source Files

should be put in the .hpp file, and not in the .inline.hpp file. This rule exists to resolve problems with circular dependencies between .inline.hpp files.

-
  • All .cpp files include precompiled.hpp as the first include -line.

  • -
  • precompiled.hpp is just a build time optimization, so don't rely -on it to resolve include problems.

  • +
  • Some build configurations use precompiled headers to speed up the +build times. The precompiled headers are included in the precompiled.hpp +file. Note that precompiled.hpp is just a build time optimization, so +don't rely on it to resolve include problems.

  • Keep the include lines alphabetically sorted.

  • Put conditional inclusions (#if ...) at the end of the include list.

  • diff --git a/doc/hotspot-style.md b/doc/hotspot-style.md index f5e59648cb23c..0150662981736 100644 --- a/doc/hotspot-style.md +++ b/doc/hotspot-style.md @@ -150,10 +150,10 @@ the first include line. Declarations needed by other files should be put in the .hpp file, and not in the .inline.hpp file. This rule exists to resolve problems with circular dependencies between .inline.hpp files. -* All .cpp files include precompiled.hpp as the first include line. - -* precompiled.hpp is just a build time optimization, so don't rely on -it to resolve include problems. +* Some build configurations use precompiled headers to speed up the +build times. The precompiled headers are included in the precompiled.hpp +file. Note that precompiled.hpp is just a build time optimization, so +don't rely on it to resolve include problems. * Keep the include lines alphabetically sorted. diff --git a/make/RunTests.gmk b/make/RunTests.gmk index 636d1ed18b23b..27a3c9b8b4540 100644 --- a/make/RunTests.gmk +++ b/make/RunTests.gmk @@ -78,6 +78,9 @@ $(eval $(call IncludeCustomExtension, RunTests.gmk)) # This is the JDK that we will test JDK_UNDER_TEST := $(JDK_IMAGE_DIR) +# The JDK used to compile jtreg test code. By default it is the same as +# JDK_UNDER_TEST. +JDK_FOR_COMPILE := $(JDK_IMAGE_DIR) TEST_RESULTS_DIR := $(OUTPUTDIR)/test-results TEST_SUPPORT_DIR := $(OUTPUTDIR)/test-support @@ -979,6 +982,7 @@ define SetupRunJtregTestBody $$(JTREG_JAVA) $$($1_JTREG_LAUNCHER_OPTIONS) \ -Dprogram=jtreg -jar $$(JT_HOME)/lib/jtreg.jar \ $$($1_JTREG_BASIC_OPTIONS) \ + -compilejdk:$$(JDK_FOR_COMPILE) \ -testjdk:$$(JDK_UNDER_TEST) \ -dir:$$(JTREG_TOPDIR) \ -reportDir:$$($1_TEST_RESULTS_DIR) \ diff --git a/make/autoconf/basic.m4 b/make/autoconf/basic.m4 index 35eb63bea0c84..d897cbafba79f 100644 --- a/make/autoconf/basic.m4 +++ b/make/autoconf/basic.m4 @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -624,4 +624,10 @@ AC_DEFUN_ONCE([BASIC_POST_CONFIG_OUTPUT], # Make the compare script executable $CHMOD +x $OUTPUTDIR/compare.sh + + # Copy the linker wrapper script for clang on AIX and make it executable + if test "x$TOOLCHAIN_TYPE" = xclang && test "x$OPENJDK_TARGET_OS" = xaix; then + $CP -f "$TOPDIR/make/scripts/aix/ld.sh" "$OUTPUTDIR/ld.sh" + $CHMOD +x "$OUTPUTDIR/ld.sh" + fi ]) diff --git a/make/autoconf/flags-cflags.m4 b/make/autoconf/flags-cflags.m4 index 38021267f42df..d0a74cadff3a2 100644 --- a/make/autoconf/flags-cflags.m4 +++ b/make/autoconf/flags-cflags.m4 @@ -278,7 +278,7 @@ AC_DEFUN([FLAGS_SETUP_WARNINGS], AC_DEFUN([FLAGS_SETUP_QUALITY_CHECKS], [ # bounds, memory and behavior checking options - if test "x$TOOLCHAIN_TYPE" = xgcc; then + if test "x$TOOLCHAIN_TYPE" = xgcc || test "x$TOOLCHAIN_TYPE" = xclang; then case $DEBUG_LEVEL in release ) # no adjustment @@ -517,12 +517,6 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_HELPER], -fvisibility=hidden -fno-strict-aliasing -fno-omit-frame-pointer" fi - if test "x$TOOLCHAIN_TYPE" = xclang && test "x$OPENJDK_TARGET_OS" = xaix; then - # clang compiler on aix needs -ffunction-sections - TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -ffunction-sections -ftls-model -fno-math-errno -fstack-protector" - TOOLCHAIN_CFLAGS_JDK="-ffunction-sections -fsigned-char -fstack-protector" - fi - if test "x$TOOLCHAIN_TYPE" = xgcc; then TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -fstack-protector" TOOLCHAIN_CFLAGS_JDK="-fvisibility=hidden -pipe -fstack-protector" @@ -542,7 +536,7 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_HELPER], # Restrict the debug information created by Clang to avoid # too big object files and speed the build up a little bit # (see http://llvm.org/bugs/show_bug.cgi?id=7554) - TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -flimit-debug-info" + TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -flimit-debug-info -fstack-protector" # In principle the stack alignment below is cpu- and ABI-dependent and # should agree with values of StackAlignmentInBytes in various @@ -560,7 +554,13 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_HELPER], TOOLCHAIN_CFLAGS_JDK="-pipe" TOOLCHAIN_CFLAGS_JDK_CONLY="-fno-strict-aliasing" # technically NOT for CXX fi - TOOLCHAIN_CFLAGS_JDK="$TOOLCHAIN_CFLAGS_JDK -fvisibility=hidden" + + if test "x$OPENJDK_TARGET_OS" = xaix; then + TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -ffunction-sections -ftls-model -fno-math-errno" + TOOLCHAIN_CFLAGS_JDK="-ffunction-sections -fsigned-char" + fi + + TOOLCHAIN_CFLAGS_JDK="$TOOLCHAIN_CFLAGS_JDK -fvisibility=hidden -fstack-protector" elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then # The -utf-8 option sets source and execution character sets to UTF-8 to enable correct diff --git a/make/autoconf/flags-ldflags.m4 b/make/autoconf/flags-ldflags.m4 index 2e060a71d4d05..90947494ec8af 100644 --- a/make/autoconf/flags-ldflags.m4 +++ b/make/autoconf/flags-ldflags.m4 @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -79,7 +79,7 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER], fi if test "x$OPENJDK_TARGET_OS" = xaix; then BASIC_LDFLAGS="-Wl,-b64 -Wl,-brtl -Wl,-bnorwexec -Wl,-bnolibpath -Wl,-bnoexpall \ - -Wl,-bernotok -Wl,-bdatapsize:64k -Wl,-btextpsize:64k -Wl,-bstackpsize:64k" + -Wl,-bernotok -Wl,-bdatapsize:64k -Wl,-btextpsize:64k -Wl,-bstackpsize:64k -fuse-ld=$OUTPUTDIR/ld.sh" BASIC_LDFLAGS_JVM_ONLY="$BASIC_LDFLAGS_JVM_ONLY -Wl,-lC_r -Wl,-bbigtoc" fi diff --git a/make/common/Modules.gmk b/make/common/Modules.gmk index 8ae33b3641eae..c8c78ed8041f4 100644 --- a/make/common/Modules.gmk +++ b/make/common/Modules.gmk @@ -1,5 +1,5 @@ # -# Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -92,7 +92,7 @@ SRC_SUBDIRS += share/classes SPEC_SUBDIRS += share/specs -MAN_SUBDIRS += share/man +MAN_SUBDIRS += share/man windows/man # Find all module-info.java files for the current build target platform and # configuration. diff --git a/make/modules/java.desktop/lib/ClientLibraries.gmk b/make/modules/java.desktop/lib/ClientLibraries.gmk index 41f3040222cc0..221d757898068 100644 --- a/make/modules/java.desktop/lib/ClientLibraries.gmk +++ b/make/modules/java.desktop/lib/ClientLibraries.gmk @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -360,8 +360,6 @@ else LIBFONTMANAGER_JDK_LIBS += libfreetype endif -LIBFONTMANAGER_OPTIMIZATION := HIGHEST - ifneq ($(filter $(TOOLCHAIN_TYPE), gcc clang), ) # gcc (and to an extent clang) is particularly bad at optimizing these files, # causing a massive spike in compile time. We don't care about these @@ -372,7 +370,6 @@ endif ifeq ($(call isTargetOs, windows), true) LIBFONTMANAGER_EXCLUDE_FILES += X11FontScaler.c X11TextRenderer.c - LIBFONTMANAGER_OPTIMIZATION := HIGHEST else ifeq ($(call isTargetOs, macosx), true) LIBFONTMANAGER_EXCLUDE_FILES += X11FontScaler.c X11TextRenderer.c \ fontpath.c lcdglyph.c @@ -393,7 +390,7 @@ $(eval $(call SetupJdkLibrary, BUILD_LIBFONTMANAGER, \ AccelGlyphCache.c, \ CFLAGS := $(LIBFONTMANAGER_CFLAGS), \ CXXFLAGS := $(LIBFONTMANAGER_CFLAGS), \ - OPTIMIZATION := $(LIBFONTMANAGER_OPTIMIZATION), \ + OPTIMIZATION := HIGHEST, \ CFLAGS_windows = -DCC_NOEX, \ EXTRA_HEADER_DIRS := $(LIBFONTMANAGER_EXTRA_HEADER_DIRS), \ EXTRA_SRC := $(LIBFONTMANAGER_EXTRA_SRC), \ diff --git a/make/scripts/aix/ld.sh b/make/scripts/aix/ld.sh new file mode 100644 index 0000000000000..faa77ce4ba5a8 --- /dev/null +++ b/make/scripts/aix/ld.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# +# Copyright (c) 2025 SAP SE. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. Oracle designates this +# particular file as subject to the "Classpath" exception as provided +# by Oracle in the LICENSE file that accompanied this code. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +unset LIBPATH +exec /usr/bin/ld "$@" diff --git a/src/hotspot/cpu/aarch64/stubDeclarations_aarch64.hpp b/src/hotspot/cpu/aarch64/stubDeclarations_aarch64.hpp new file mode 100644 index 0000000000000..1830bdf4a88d6 --- /dev/null +++ b/src/hotspot/cpu/aarch64/stubDeclarations_aarch64.hpp @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2025, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_AARCH64_STUBDECLARATIONS_HPP +#define CPU_AARCH64_STUBDECLARATIONS_HPP + +#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \ + do_arch_blob, \ + do_arch_entry, \ + do_arch_entry_init) \ + do_arch_blob(initial, 10000) \ + + +#define STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, \ + do_arch_blob, \ + do_arch_entry, \ + do_arch_entry_init) \ + do_arch_blob(continuation, 2000) \ + + +#define STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, \ + do_arch_blob, \ + do_arch_entry, \ + do_arch_entry_init) \ + do_arch_blob(compiler, 30000 ZGC_ONLY(+10000)) \ + do_stub(compiler, vector_iota_indices) \ + do_arch_entry(aarch64, compiler, vector_iota_indices, \ + vector_iota_indices, vector_iota_indices) \ + do_stub(compiler, large_array_equals) \ + do_arch_entry(aarch64, compiler, large_array_equals, \ + large_array_equals, large_array_equals) \ + do_stub(compiler, large_arrays_hashcode_boolean) \ + do_arch_entry(aarch64, compiler, large_arrays_hashcode_boolean, \ + large_arrays_hashcode_boolean, \ + large_arrays_hashcode_boolean) \ + do_stub(compiler, large_arrays_hashcode_byte) \ + do_arch_entry(aarch64, compiler, large_arrays_hashcode_byte, \ + large_arrays_hashcode_byte, \ + large_arrays_hashcode_byte) \ + do_stub(compiler, large_arrays_hashcode_char) \ + do_arch_entry(aarch64, compiler, large_arrays_hashcode_char, \ + large_arrays_hashcode_char, \ + large_arrays_hashcode_char) \ + do_stub(compiler, large_arrays_hashcode_short) \ + do_arch_entry(aarch64, compiler, large_arrays_hashcode_short, \ + large_arrays_hashcode_short, \ + large_arrays_hashcode_short) \ + do_stub(compiler, large_arrays_hashcode_int) \ + do_arch_entry(aarch64, compiler, large_arrays_hashcode_int, \ + large_arrays_hashcode_int, \ + large_arrays_hashcode_int) \ + do_stub(compiler, large_byte_array_inflate) \ + do_arch_entry(aarch64, compiler, large_byte_array_inflate, \ + large_byte_array_inflate, large_byte_array_inflate) \ + do_stub(compiler, count_positives) \ + do_arch_entry(aarch64, compiler, count_positives, count_positives, \ + count_positives) \ + do_stub(compiler, count_positives_long) \ + do_arch_entry(aarch64, compiler, count_positives_long, \ + count_positives_long, count_positives_long) \ + do_stub(compiler, compare_long_string_LL) \ + do_arch_entry(aarch64, compiler, compare_long_string_LL, \ + compare_long_string_LL, compare_long_string_LL) \ + do_stub(compiler, compare_long_string_UU) \ + do_arch_entry(aarch64, compiler, compare_long_string_UU, \ + compare_long_string_UU, compare_long_string_UU) \ + do_stub(compiler, compare_long_string_LU) \ + do_arch_entry(aarch64, compiler, compare_long_string_LU, \ + compare_long_string_LU, compare_long_string_LU) \ + do_stub(compiler, compare_long_string_UL) \ + do_arch_entry(aarch64, compiler, compare_long_string_UL, \ + compare_long_string_UL, compare_long_string_UL) \ + do_stub(compiler, string_indexof_linear_ll) \ + do_arch_entry(aarch64, compiler, string_indexof_linear_ll, \ + string_indexof_linear_ll, string_indexof_linear_ll) \ + do_stub(compiler, string_indexof_linear_uu) \ + do_arch_entry(aarch64, compiler, string_indexof_linear_uu, \ + string_indexof_linear_uu, string_indexof_linear_uu) \ + do_stub(compiler, string_indexof_linear_ul) \ + do_arch_entry(aarch64, compiler, string_indexof_linear_ul, \ + string_indexof_linear_ul, string_indexof_linear_ul) \ + /* this uses the entry for ghash_processBlocks */ \ + do_stub(compiler, ghash_processBlocks_wide) \ + + +#define STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, \ + do_arch_blob, \ + do_arch_entry, \ + do_arch_entry_init) \ + do_arch_blob(final, 20000 ZGC_ONLY(+100000)) \ + do_stub(final, copy_byte_f) \ + do_arch_entry(aarch64, final, copy_byte_f, copy_byte_f, \ + copy_byte_f) \ + do_stub(final, copy_byte_b) \ + do_arch_entry(aarch64, final, copy_byte_b, copy_byte_b, \ + copy_byte_b) \ + do_stub(final, copy_oop_f) \ + do_arch_entry(aarch64, final, copy_oop_f, copy_oop_f, copy_oop_f) \ + do_stub(final, copy_oop_b) \ + do_arch_entry(aarch64, final, copy_oop_b, copy_oop_b, copy_oop_b) \ + do_stub(final, copy_oop_uninit_f) \ + do_arch_entry(aarch64, final, copy_oop_uninit_f, copy_oop_uninit_f, \ + copy_oop_uninit_f) \ + do_stub(final, copy_oop_uninit_b) \ + do_arch_entry(aarch64, final, copy_oop_uninit_b, copy_oop_uninit_b, \ + copy_oop_uninit_b) \ + do_stub(final, zero_blocks) \ + do_arch_entry(aarch64, final, zero_blocks, zero_blocks, \ + zero_blocks) \ + do_stub(final, spin_wait) \ + do_arch_entry_init(aarch64, final, spin_wait, spin_wait, \ + spin_wait, empty_spin_wait) \ + /* stub only -- entries are not stored in StubRoutines::aarch64 */ \ + /* n.b. these are not the same as the generic atomic stubs */ \ + do_stub(final, atomic_entry_points) \ + + +#endif // CPU_AARCH64_STUBDECLARATIONS_HPP diff --git a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp index 0986e45a0a2c2..23abbf180fa93 100644 --- a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2014, 2024, Red Hat Inc. All rights reserved. + * Copyright (c) 2014, 2025, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -202,7 +202,8 @@ class StubGenerator: public StubCodeGenerator { (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, "adjust this code"); - StubCodeMark mark(this, "StubRoutines", "call_stub"); + StubGenStubId stub_id = StubGenStubId::call_stub_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); const Address sp_after_call (rfp, sp_after_call_off * wordSize); @@ -421,7 +422,8 @@ class StubGenerator: public StubCodeGenerator { // r0: exception oop address generate_catch_exception() { - StubCodeMark mark(this, "StubRoutines", "catch_exception"); + StubGenStubId stub_id = StubGenStubId::catch_exception_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); // same as in generate_call_stub(): @@ -476,7 +478,8 @@ class StubGenerator: public StubCodeGenerator { // so it just needs to be generated code with no x86 prolog address generate_forward_exception() { - StubCodeMark mark(this, "StubRoutines", "forward exception"); + StubGenStubId stub_id = StubGenStubId::forward_exception_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); // Upon entry, LR points to the return address returning into @@ -565,8 +568,8 @@ class StubGenerator: public StubCodeGenerator { // [tos + 4]: saved r0 // [tos + 5]: saved rscratch1 address generate_verify_oop() { - - StubCodeMark mark(this, "StubRoutines", "verify_oop"); + StubGenStubId stub_id = StubGenStubId::verify_oop_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); Label exit, error; @@ -614,9 +617,9 @@ class StubGenerator: public StubCodeGenerator { } // Generate indices for iota vector. - address generate_iota_indices(const char *stub_name) { + address generate_iota_indices(StubGenStubId stub_id) { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", stub_name); + StubCodeMark mark(this, stub_id); address start = __ pc(); // B __ emit_data64(0x0706050403020100, relocInfo::none); @@ -659,7 +662,8 @@ class StubGenerator: public StubCodeGenerator { Register base = r10, cnt = r11; __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "zero_blocks"); + StubGenStubId stub_id = StubGenStubId::zero_blocks_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); if (UseBlockZeroing) { @@ -798,8 +802,39 @@ class StubGenerator: public StubCodeGenerator { // // s and d are adjusted to point to the remaining words to copy // - void generate_copy_longs(DecoratorSet decorators, BasicType type, Label &start, Register s, Register d, Register count, - copy_direction direction) { + void generate_copy_longs(StubGenStubId stub_id, DecoratorSet decorators, Label &start, Register s, Register d, Register count) { + BasicType type; + copy_direction direction; + + switch (stub_id) { + case copy_byte_f_id: + direction = copy_forwards; + type = T_BYTE; + break; + case copy_byte_b_id: + direction = copy_backwards; + type = T_BYTE; + break; + case copy_oop_f_id: + direction = copy_forwards; + type = T_OBJECT; + break; + case copy_oop_b_id: + direction = copy_backwards; + type = T_OBJECT; + break; + case copy_oop_uninit_f_id: + direction = copy_forwards; + type = T_OBJECT; + break; + case copy_oop_uninit_b_id: + direction = copy_backwards; + type = T_OBJECT; + break; + default: + ShouldNotReachHere(); + } + int unit = wordSize * direction; int bias = (UseSIMDForMemoryOps ? 4:2) * wordSize; @@ -814,15 +849,10 @@ class StubGenerator: public StubCodeGenerator { assert_different_registers(s, d, count, rscratch1, rscratch2); Label again, drain; - const char *stub_name; - if (direction == copy_forwards) - stub_name = "forward_copy_longs"; - else - stub_name = "backward_copy_longs"; __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", stub_name); + StubCodeMark mark(this, stub_id); __ bind(start); @@ -1477,10 +1507,11 @@ class StubGenerator: public StubCodeGenerator { } // Arguments: - // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary - // ignored - // is_oop - true => oop array, so generate store check code - // name - stub name string + // stub_id - is used to name the stub and identify all details of + // how to perform the copy. + // + // entry - is assigned to the stub's post push entry point unless + // it is null // // Inputs: // c_rarg0 - source array address @@ -1491,16 +1522,96 @@ class StubGenerator: public StubCodeGenerator { // the hardware handle it. The two dwords within qwords that span // cache line boundaries will still be loaded and stored atomically. // - // Side Effects: - // disjoint_int_copy_entry is set to the no-overlap entry point - // used by generate_conjoint_int_oop_copy(). + // Side Effects: entry is set to the (post push) entry point so it + // can be used by the corresponding conjoint copy + // method // - address generate_disjoint_copy(int size, bool aligned, bool is_oop, address *entry, - const char *name, bool dest_uninitialized = false) { + address generate_disjoint_copy(StubGenStubId stub_id, address *entry) { Register s = c_rarg0, d = c_rarg1, count = c_rarg2; RegSet saved_reg = RegSet::of(s, d, count); + int size; + bool aligned; + bool is_oop; + bool dest_uninitialized; + switch (stub_id) { + case jbyte_disjoint_arraycopy_id: + size = sizeof(jbyte); + aligned = false; + is_oop = false; + dest_uninitialized = false; + break; + case arrayof_jbyte_disjoint_arraycopy_id: + size = sizeof(jbyte); + aligned = true; + is_oop = false; + dest_uninitialized = false; + break; + case jshort_disjoint_arraycopy_id: + size = sizeof(jshort); + aligned = false; + is_oop = false; + dest_uninitialized = false; + break; + case arrayof_jshort_disjoint_arraycopy_id: + size = sizeof(jshort); + aligned = true; + is_oop = false; + dest_uninitialized = false; + break; + case jint_disjoint_arraycopy_id: + size = sizeof(jint); + aligned = false; + is_oop = false; + dest_uninitialized = false; + break; + case arrayof_jint_disjoint_arraycopy_id: + size = sizeof(jint); + aligned = true; + is_oop = false; + dest_uninitialized = false; + break; + case jlong_disjoint_arraycopy_id: + // since this is always aligned we can (should!) use the same + // stub as for case arrayof_jlong_disjoint_arraycopy + ShouldNotReachHere(); + break; + case arrayof_jlong_disjoint_arraycopy_id: + size = sizeof(jlong); + aligned = true; + is_oop = false; + dest_uninitialized = false; + break; + case oop_disjoint_arraycopy_id: + size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); + aligned = !UseCompressedOops; + is_oop = true; + dest_uninitialized = false; + break; + case arrayof_oop_disjoint_arraycopy_id: + size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); + aligned = !UseCompressedOops; + is_oop = true; + dest_uninitialized = false; + break; + case oop_disjoint_arraycopy_uninit_id: + size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); + aligned = !UseCompressedOops; + is_oop = true; + dest_uninitialized = true; + break; + case arrayof_oop_disjoint_arraycopy_uninit_id: + size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); + aligned = !UseCompressedOops; + is_oop = true; + dest_uninitialized = true; + break; + default: + ShouldNotReachHere(); + break; + } + __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -1547,10 +1658,16 @@ class StubGenerator: public StubCodeGenerator { } // Arguments: - // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary - // ignored - // is_oop - true => oop array, so generate store check code - // name - stub name string + // stub_id - is used to name the stub and identify all details of + // how to perform the copy. + // + // nooverlap_target - identifes the (post push) entry for the + // corresponding disjoint copy routine which can be + // jumped to if the ranges do not actually overlap + // + // entry - is assigned to the stub's post push entry point unless + // it is null + // // // Inputs: // c_rarg0 - source array address @@ -1561,12 +1678,94 @@ class StubGenerator: public StubCodeGenerator { // the hardware handle it. The two dwords within qwords that span // cache line boundaries will still be loaded and stored atomically. // - address generate_conjoint_copy(int size, bool aligned, bool is_oop, address nooverlap_target, - address *entry, const char *name, - bool dest_uninitialized = false) { + // Side Effects: + // entry is set to the no-overlap entry point so it can be used by + // some other conjoint copy method + // + address generate_conjoint_copy(StubGenStubId stub_id, address nooverlap_target, address *entry) { Register s = c_rarg0, d = c_rarg1, count = c_rarg2; RegSet saved_regs = RegSet::of(s, d, count); - StubCodeMark mark(this, "StubRoutines", name); + int size; + bool aligned; + bool is_oop; + bool dest_uninitialized; + switch (stub_id) { + case jbyte_arraycopy_id: + size = sizeof(jbyte); + aligned = false; + is_oop = false; + dest_uninitialized = false; + break; + case arrayof_jbyte_arraycopy_id: + size = sizeof(jbyte); + aligned = true; + is_oop = false; + dest_uninitialized = false; + break; + case jshort_arraycopy_id: + size = sizeof(jshort); + aligned = false; + is_oop = false; + dest_uninitialized = false; + break; + case arrayof_jshort_arraycopy_id: + size = sizeof(jshort); + aligned = true; + is_oop = false; + dest_uninitialized = false; + break; + case jint_arraycopy_id: + size = sizeof(jint); + aligned = false; + is_oop = false; + dest_uninitialized = false; + break; + case arrayof_jint_arraycopy_id: + size = sizeof(jint); + aligned = true; + is_oop = false; + dest_uninitialized = false; + break; + case jlong_arraycopy_id: + // since this is always aligned we can (should!) use the same + // stub as for case arrayof_jlong_disjoint_arraycopy + ShouldNotReachHere(); + break; + case arrayof_jlong_arraycopy_id: + size = sizeof(jlong); + aligned = true; + is_oop = false; + dest_uninitialized = false; + break; + case oop_arraycopy_id: + size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); + aligned = !UseCompressedOops; + is_oop = true; + dest_uninitialized = false; + break; + case arrayof_oop_arraycopy_id: + size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); + aligned = !UseCompressedOops; + is_oop = true; + dest_uninitialized = false; + break; + case oop_arraycopy_uninit_id: + size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); + aligned = !UseCompressedOops; + is_oop = true; + dest_uninitialized = true; + break; + case arrayof_oop_arraycopy_uninit_id: + size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); + aligned = !UseCompressedOops; + is_oop = true; + dest_uninitialized = true; + break; + default: + ShouldNotReachHere(); + } + + StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -1612,230 +1811,8 @@ class StubGenerator: public StubCodeGenerator { __ mov(r0, zr); // return 0 __ ret(lr); return start; -} - - // Arguments: - // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary - // ignored - // name - stub name string - // - // Inputs: - // c_rarg0 - source array address - // c_rarg1 - destination array address - // c_rarg2 - element count, treated as ssize_t, can be zero - // - // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, - // we let the hardware handle it. The one to eight bytes within words, - // dwords or qwords that span cache line boundaries will still be loaded - // and stored atomically. - // - // Side Effects: - // disjoint_byte_copy_entry is set to the no-overlap entry point // - // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, - // we let the hardware handle it. The one to eight bytes within words, - // dwords or qwords that span cache line boundaries will still be loaded - // and stored atomically. - // - // Side Effects: - // disjoint_byte_copy_entry is set to the no-overlap entry point - // used by generate_conjoint_byte_copy(). - // - address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { - const bool not_oop = false; - return generate_disjoint_copy(sizeof (jbyte), aligned, not_oop, entry, name); - } - - // Arguments: - // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary - // ignored - // name - stub name string - // - // Inputs: - // c_rarg0 - source array address - // c_rarg1 - destination array address - // c_rarg2 - element count, treated as ssize_t, can be zero - // - // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, - // we let the hardware handle it. The one to eight bytes within words, - // dwords or qwords that span cache line boundaries will still be loaded - // and stored atomically. - // - address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, - address* entry, const char *name) { - const bool not_oop = false; - return generate_conjoint_copy(sizeof (jbyte), aligned, not_oop, nooverlap_target, entry, name); - } - - // Arguments: - // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary - // ignored - // name - stub name string - // - // Inputs: - // c_rarg0 - source array address - // c_rarg1 - destination array address - // c_rarg2 - element count, treated as ssize_t, can be zero - // - // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we - // let the hardware handle it. The two or four words within dwords - // or qwords that span cache line boundaries will still be loaded - // and stored atomically. - // - // Side Effects: - // disjoint_short_copy_entry is set to the no-overlap entry point - // used by generate_conjoint_short_copy(). - // - address generate_disjoint_short_copy(bool aligned, - address* entry, const char *name) { - const bool not_oop = false; - return generate_disjoint_copy(sizeof (jshort), aligned, not_oop, entry, name); - } - - // Arguments: - // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary - // ignored - // name - stub name string - // - // Inputs: - // c_rarg0 - source array address - // c_rarg1 - destination array address - // c_rarg2 - element count, treated as ssize_t, can be zero - // - // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we - // let the hardware handle it. The two or four words within dwords - // or qwords that span cache line boundaries will still be loaded - // and stored atomically. - // - address generate_conjoint_short_copy(bool aligned, address nooverlap_target, - address *entry, const char *name) { - const bool not_oop = false; - return generate_conjoint_copy(sizeof (jshort), aligned, not_oop, nooverlap_target, entry, name); - - } - // Arguments: - // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary - // ignored - // name - stub name string - // - // Inputs: - // c_rarg0 - source array address - // c_rarg1 - destination array address - // c_rarg2 - element count, treated as ssize_t, can be zero - // - // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let - // the hardware handle it. The two dwords within qwords that span - // cache line boundaries will still be loaded and stored atomically. - // - // Side Effects: - // disjoint_int_copy_entry is set to the no-overlap entry point - // used by generate_conjoint_int_oop_copy(). - // - address generate_disjoint_int_copy(bool aligned, address *entry, - const char *name, bool dest_uninitialized = false) { - const bool not_oop = false; - return generate_disjoint_copy(sizeof (jint), aligned, not_oop, entry, name); - } - - // Arguments: - // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary - // ignored - // name - stub name string - // - // Inputs: - // c_rarg0 - source array address - // c_rarg1 - destination array address - // c_rarg2 - element count, treated as ssize_t, can be zero - // - // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let - // the hardware handle it. The two dwords within qwords that span - // cache line boundaries will still be loaded and stored atomically. - // - address generate_conjoint_int_copy(bool aligned, address nooverlap_target, - address *entry, const char *name, - bool dest_uninitialized = false) { - const bool not_oop = false; - return generate_conjoint_copy(sizeof (jint), aligned, not_oop, nooverlap_target, entry, name); - } - - - // Arguments: - // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes - // ignored - // name - stub name string - // - // Inputs: - // c_rarg0 - source array address - // c_rarg1 - destination array address - // c_rarg2 - element count, treated as size_t, can be zero - // - // Side Effects: - // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the - // no-overlap entry point used by generate_conjoint_long_oop_copy(). - // - address generate_disjoint_long_copy(bool aligned, address *entry, - const char *name, bool dest_uninitialized = false) { - const bool not_oop = false; - return generate_disjoint_copy(sizeof (jlong), aligned, not_oop, entry, name); - } - - // Arguments: - // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes - // ignored - // name - stub name string - // - // Inputs: - // c_rarg0 - source array address - // c_rarg1 - destination array address - // c_rarg2 - element count, treated as size_t, can be zero - // - address generate_conjoint_long_copy(bool aligned, - address nooverlap_target, address *entry, - const char *name, bool dest_uninitialized = false) { - const bool not_oop = false; - return generate_conjoint_copy(sizeof (jlong), aligned, not_oop, nooverlap_target, entry, name); } - // Arguments: - // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes - // ignored - // name - stub name string - // - // Inputs: - // c_rarg0 - source array address - // c_rarg1 - destination array address - // c_rarg2 - element count, treated as size_t, can be zero - // - // Side Effects: - // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the - // no-overlap entry point used by generate_conjoint_long_oop_copy(). - // - address generate_disjoint_oop_copy(bool aligned, address *entry, - const char *name, bool dest_uninitialized) { - const bool is_oop = true; - const int size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); - return generate_disjoint_copy(size, aligned, is_oop, entry, name, dest_uninitialized); - } - - // Arguments: - // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes - // ignored - // name - stub name string - // - // Inputs: - // c_rarg0 - source array address - // c_rarg1 - destination array address - // c_rarg2 - element count, treated as size_t, can be zero - // - address generate_conjoint_oop_copy(bool aligned, - address nooverlap_target, address *entry, - const char *name, bool dest_uninitialized) { - const bool is_oop = true; - const int size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); - return generate_conjoint_copy(size, aligned, is_oop, nooverlap_target, entry, - name, dest_uninitialized); - } - - // Helper for generating a dynamic type check. // Smashes rscratch1, rscratch2. void generate_type_check(Register sub_klass, @@ -1873,8 +1850,18 @@ class StubGenerator: public StubCodeGenerator { // r0 == 0 - success // r0 == -1^K - failure, where K is partial transfer count // - address generate_checkcast_copy(const char *name, address *entry, - bool dest_uninitialized = false) { + address generate_checkcast_copy(StubGenStubId stub_id, address *entry) { + bool dest_uninitialized; + switch (stub_id) { + case checkcast_arraycopy_id: + dest_uninitialized = false; + break; + case checkcast_arraycopy_uninit_id: + dest_uninitialized = true; + break; + default: + ShouldNotReachHere(); + } Label L_load_element, L_store_element, L_do_card_marks, L_done, L_done_pop; @@ -1908,7 +1895,7 @@ class StubGenerator: public StubCodeGenerator { copied_oop, r19_klass, count_save); __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); // required for proper stackwalking of RuntimeStub frame @@ -2080,16 +2067,17 @@ class StubGenerator: public StubCodeGenerator { // Examines the alignment of the operands and dispatches // to a long, int, short, or byte copy loop. // - address generate_unsafe_copy(const char *name, - address byte_copy_entry, + address generate_unsafe_copy(address byte_copy_entry, address short_copy_entry, address int_copy_entry, address long_copy_entry) { + StubGenStubId stub_id = StubGenStubId::unsafe_arraycopy_id; + Label L_long_aligned, L_int_aligned, L_short_aligned; Register s = c_rarg0, d = c_rarg1, count = c_rarg2; __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); // required for proper stackwalking of RuntimeStub frame @@ -2133,10 +2121,10 @@ class StubGenerator: public StubCodeGenerator { // r0 == 0 - success // r0 == -1^K - failure, where K is partial transfer count // - address generate_generic_copy(const char *name, - address byte_copy_entry, address short_copy_entry, + address generate_generic_copy(address byte_copy_entry, address short_copy_entry, address int_copy_entry, address oop_copy_entry, address long_copy_entry, address checkcast_copy_entry) { + StubGenStubId stub_id = StubGenStubId::generic_arraycopy_id; Label L_failed, L_objArray; Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; @@ -2154,7 +2142,7 @@ class StubGenerator: public StubCodeGenerator { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -2407,9 +2395,41 @@ class StubGenerator: public StubCodeGenerator { // value: c_rarg1 // count: c_rarg2 treated as signed // - address generate_fill(BasicType t, bool aligned, const char *name) { + address generate_fill(StubGenStubId stub_id) { + BasicType t; + bool aligned; + + switch (stub_id) { + case jbyte_fill_id: + t = T_BYTE; + aligned = false; + break; + case jshort_fill_id: + t = T_SHORT; + aligned = false; + break; + case jint_fill_id: + t = T_INT; + aligned = false; + break; + case arrayof_jbyte_fill_id: + t = T_BYTE; + aligned = true; + break; + case arrayof_jshort_fill_id: + t = T_SHORT; + aligned = true; + break; + case arrayof_jint_fill_id: + t = T_INT; + aligned = true; + break; + default: + ShouldNotReachHere(); + }; + __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); BLOCK_COMMENT("Entry:"); @@ -2551,7 +2571,8 @@ class StubGenerator: public StubCodeGenerator { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "_data_cache_writeback"); + StubGenStubId stub_id = StubGenStubId::data_cache_writeback_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -2567,7 +2588,8 @@ class StubGenerator: public StubCodeGenerator { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "_data_cache_writeback_sync"); + StubGenStubId stub_id = StubGenStubId::data_cache_writeback_sync_id; + StubCodeMark mark(this, stub_id); // pre wbsync is a no-op // post wbsync translates to an sfence @@ -2593,61 +2615,44 @@ class StubGenerator: public StubCodeGenerator { address entry_jlong_arraycopy; address entry_checkcast_arraycopy; - generate_copy_longs(IN_HEAP | IS_ARRAY, T_BYTE, copy_f, r0, r1, r15, copy_forwards); - generate_copy_longs(IN_HEAP | IS_ARRAY, T_BYTE, copy_b, r0, r1, r15, copy_backwards); + generate_copy_longs(StubGenStubId::copy_byte_f_id, IN_HEAP | IS_ARRAY, copy_f, r0, r1, r15); + generate_copy_longs(StubGenStubId::copy_byte_b_id, IN_HEAP | IS_ARRAY, copy_b, r0, r1, r15); - generate_copy_longs(IN_HEAP | IS_ARRAY, T_OBJECT, copy_obj_f, r0, r1, r15, copy_forwards); - generate_copy_longs(IN_HEAP | IS_ARRAY, T_OBJECT, copy_obj_b, r0, r1, r15, copy_backwards); + generate_copy_longs(StubGenStubId::copy_oop_f_id, IN_HEAP | IS_ARRAY, copy_obj_f, r0, r1, r15); + generate_copy_longs(StubGenStubId::copy_oop_b_id, IN_HEAP | IS_ARRAY, copy_obj_b, r0, r1, r15); - generate_copy_longs(IN_HEAP | IS_ARRAY | IS_DEST_UNINITIALIZED, T_OBJECT, copy_obj_uninit_f, r0, r1, r15, copy_forwards); - generate_copy_longs(IN_HEAP | IS_ARRAY | IS_DEST_UNINITIALIZED, T_OBJECT, copy_obj_uninit_b, r0, r1, r15, copy_backwards); + generate_copy_longs(StubGenStubId::copy_oop_uninit_f_id, IN_HEAP | IS_ARRAY | IS_DEST_UNINITIALIZED, copy_obj_uninit_f, r0, r1, r15); + generate_copy_longs(StubGenStubId::copy_oop_uninit_b_id, IN_HEAP | IS_ARRAY | IS_DEST_UNINITIALIZED, copy_obj_uninit_b, r0, r1, r15); StubRoutines::aarch64::_zero_blocks = generate_zero_blocks(); //*** jbyte // Always need aligned and unaligned versions - StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, - "jbyte_disjoint_arraycopy"); - StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, - &entry_jbyte_arraycopy, - "jbyte_arraycopy"); - StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, &entry, - "arrayof_jbyte_disjoint_arraycopy"); - StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, entry, nullptr, - "arrayof_jbyte_arraycopy"); + StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_copy(StubGenStubId::jbyte_disjoint_arraycopy_id, &entry); + StubRoutines::_jbyte_arraycopy = generate_conjoint_copy(StubGenStubId::jbyte_arraycopy_id, entry, &entry_jbyte_arraycopy); + StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_copy(StubGenStubId::arrayof_jbyte_disjoint_arraycopy_id, &entry); + StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_copy(StubGenStubId::arrayof_jbyte_arraycopy_id, entry, nullptr); //*** jshort // Always need aligned and unaligned versions - StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, - "jshort_disjoint_arraycopy"); - StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, - &entry_jshort_arraycopy, - "jshort_arraycopy"); - StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, &entry, - "arrayof_jshort_disjoint_arraycopy"); - StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, entry, nullptr, - "arrayof_jshort_arraycopy"); + StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_copy(StubGenStubId::jshort_disjoint_arraycopy_id, &entry); + StubRoutines::_jshort_arraycopy = generate_conjoint_copy(StubGenStubId::jshort_arraycopy_id, entry, &entry_jshort_arraycopy); + StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_copy(StubGenStubId::arrayof_jshort_disjoint_arraycopy_id, &entry); + StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_copy(StubGenStubId::arrayof_jshort_arraycopy_id, entry, nullptr); //*** jint // Aligned versions - StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, &entry, - "arrayof_jint_disjoint_arraycopy"); - StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(true, entry, &entry_jint_arraycopy, - "arrayof_jint_arraycopy"); + StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_copy(StubGenStubId::arrayof_jint_disjoint_arraycopy_id, &entry); + StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_copy(StubGenStubId::arrayof_jint_arraycopy_id, entry, &entry_jint_arraycopy); // In 64 bit we need both aligned and unaligned versions of jint arraycopy. // entry_jint_arraycopy always points to the unaligned version - StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, &entry, - "jint_disjoint_arraycopy"); - StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, entry, - &entry_jint_arraycopy, - "jint_arraycopy"); + StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_copy(StubGenStubId::jint_disjoint_arraycopy_id, &entry); + StubRoutines::_jint_arraycopy = generate_conjoint_copy(StubGenStubId::jint_arraycopy_id, entry, &entry_jint_arraycopy); //*** jlong // It is always aligned - StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, &entry, - "arrayof_jlong_disjoint_arraycopy"); - StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_long_copy(true, entry, &entry_jlong_arraycopy, - "arrayof_jlong_arraycopy"); + StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_copy(StubGenStubId::arrayof_jlong_disjoint_arraycopy_id, &entry); + StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_copy(StubGenStubId::arrayof_jlong_arraycopy_id, entry, &entry_jlong_arraycopy); StubRoutines::_jlong_disjoint_arraycopy = StubRoutines::_arrayof_jlong_disjoint_arraycopy; StubRoutines::_jlong_arraycopy = StubRoutines::_arrayof_jlong_arraycopy; @@ -2658,18 +2663,14 @@ class StubGenerator: public StubCodeGenerator { bool aligned = !UseCompressedOops; StubRoutines::_arrayof_oop_disjoint_arraycopy - = generate_disjoint_oop_copy(aligned, &entry, "arrayof_oop_disjoint_arraycopy", - /*dest_uninitialized*/false); + = generate_disjoint_copy(StubGenStubId::arrayof_oop_disjoint_arraycopy_id, &entry); StubRoutines::_arrayof_oop_arraycopy - = generate_conjoint_oop_copy(aligned, entry, &entry_oop_arraycopy, "arrayof_oop_arraycopy", - /*dest_uninitialized*/false); + = generate_conjoint_copy(StubGenStubId::arrayof_oop_arraycopy_id, entry, &entry_oop_arraycopy); // Aligned versions without pre-barriers StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit - = generate_disjoint_oop_copy(aligned, &entry, "arrayof_oop_disjoint_arraycopy_uninit", - /*dest_uninitialized*/true); + = generate_disjoint_copy(StubGenStubId::arrayof_oop_disjoint_arraycopy_uninit_id, &entry); StubRoutines::_arrayof_oop_arraycopy_uninit - = generate_conjoint_oop_copy(aligned, entry, nullptr, "arrayof_oop_arraycopy_uninit", - /*dest_uninitialized*/true); + = generate_conjoint_copy(StubGenStubId::arrayof_oop_arraycopy_uninit_id, entry, nullptr); } StubRoutines::_oop_disjoint_arraycopy = StubRoutines::_arrayof_oop_disjoint_arraycopy; @@ -2677,30 +2678,27 @@ class StubGenerator: public StubCodeGenerator { StubRoutines::_oop_disjoint_arraycopy_uninit = StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit; StubRoutines::_oop_arraycopy_uninit = StubRoutines::_arrayof_oop_arraycopy_uninit; - StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); - StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", nullptr, - /*dest_uninitialized*/true); + StubRoutines::_checkcast_arraycopy = generate_checkcast_copy(StubGenStubId::checkcast_arraycopy_id, &entry_checkcast_arraycopy); + StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy(StubGenStubId::checkcast_arraycopy_uninit_id, nullptr); - StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", - entry_jbyte_arraycopy, + StubRoutines::_unsafe_arraycopy = generate_unsafe_copy(entry_jbyte_arraycopy, entry_jshort_arraycopy, entry_jint_arraycopy, entry_jlong_arraycopy); - StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", - entry_jbyte_arraycopy, + StubRoutines::_generic_arraycopy = generate_generic_copy(entry_jbyte_arraycopy, entry_jshort_arraycopy, entry_jint_arraycopy, entry_oop_arraycopy, entry_jlong_arraycopy, entry_checkcast_arraycopy); - StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); - StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); - StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); - StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); - StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); - StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); + StubRoutines::_jbyte_fill = generate_fill(StubGenStubId::jbyte_fill_id); + StubRoutines::_jshort_fill = generate_fill(StubGenStubId::jshort_fill_id); + StubRoutines::_jint_fill = generate_fill(StubGenStubId::jint_fill_id); + StubRoutines::_arrayof_jbyte_fill = generate_fill(StubGenStubId::arrayof_jbyte_fill_id); + StubRoutines::_arrayof_jshort_fill = generate_fill(StubGenStubId::arrayof_jshort_fill_id); + StubRoutines::_arrayof_jint_fill = generate_fill(StubGenStubId::arrayof_jint_fill_id); } void generate_math_stubs() { Unimplemented(); } @@ -2714,7 +2712,8 @@ class StubGenerator: public StubCodeGenerator { // address generate_aescrypt_encryptBlock() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); + StubGenStubId stub_id = StubGenStubId::aescrypt_encryptBlock_id; + StubCodeMark mark(this, stub_id); const Register from = c_rarg0; // source array address const Register to = c_rarg1; // destination array address @@ -2747,7 +2746,8 @@ class StubGenerator: public StubCodeGenerator { address generate_aescrypt_decryptBlock() { assert(UseAES, "need AES cryptographic extension support"); __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); + StubGenStubId stub_id = StubGenStubId::aescrypt_decryptBlock_id; + StubCodeMark mark(this, stub_id); Label L_doLast; const Register from = c_rarg0; // source array address @@ -2785,7 +2785,8 @@ class StubGenerator: public StubCodeGenerator { address generate_cipherBlockChaining_encryptAESCrypt() { assert(UseAES, "need AES cryptographic extension support"); __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); + StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_encryptAESCrypt_id; + StubCodeMark mark(this, stub_id); Label L_loadkeys_44, L_loadkeys_52, L_aes_loop, L_rounds_44, L_rounds_52; @@ -2889,7 +2890,8 @@ class StubGenerator: public StubCodeGenerator { address generate_cipherBlockChaining_decryptAESCrypt() { assert(UseAES, "need AES cryptographic extension support"); __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); + StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_decryptAESCrypt_id; + StubCodeMark mark(this, stub_id); Label L_loadkeys_44, L_loadkeys_52, L_aes_loop, L_rounds_44, L_rounds_52; @@ -3075,7 +3077,8 @@ class StubGenerator: public StubCodeGenerator { // Wide bulk encryption of whole blocks. __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt"); + StubGenStubId stub_id = StubGenStubId::counterMode_AESCrypt_id; + StubCodeMark mark(this, stub_id); const address start = __ pc(); __ enter(); @@ -3284,7 +3287,8 @@ class StubGenerator: public StubCodeGenerator { __ emit_int64(0x87); __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "galoisCounterMode_AESCrypt"); + StubGenStubId stub_id = StubGenStubId::galoisCounterMode_AESCrypt_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -3493,9 +3497,21 @@ class StubGenerator: public StubCodeGenerator { // c_rarg2 - int offset // c_rarg3 - int limit // - address generate_md5_implCompress(bool multi_block, const char *name) { + address generate_md5_implCompress(StubGenStubId stub_id) { + bool multi_block; + switch (stub_id) { + case md5_implCompress_id: + multi_block = false; + break; + case md5_implCompressMB_id: + multi_block = true; + break; + default: + ShouldNotReachHere(); + } __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + + StubCodeMark mark(this, stub_id); address start = __ pc(); Register buf = c_rarg0; @@ -3634,9 +3650,22 @@ class StubGenerator: public StubCodeGenerator { // c_rarg2 - int offset // c_rarg3 - int limit // - address generate_sha1_implCompress(bool multi_block, const char *name) { + address generate_sha1_implCompress(StubGenStubId stub_id) { + bool multi_block; + switch (stub_id) { + case sha1_implCompress_id: + multi_block = false; + break; + case sha1_implCompressMB_id: + multi_block = true; + break; + default: + ShouldNotReachHere(); + } + __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + + StubCodeMark mark(this, stub_id); address start = __ pc(); Register buf = c_rarg0; @@ -3726,7 +3755,19 @@ class StubGenerator: public StubCodeGenerator { // c_rarg2 - int offset // c_rarg3 - int limit // - address generate_sha256_implCompress(bool multi_block, const char *name) { + address generate_sha256_implCompress(StubGenStubId stub_id) { + bool multi_block; + switch (stub_id) { + case sha256_implCompress_id: + multi_block = false; + break; + case sha256_implCompressMB_id: + multi_block = true; + break; + default: + ShouldNotReachHere(); + } + static const uint32_t round_consts[64] = { 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, @@ -3745,8 +3786,10 @@ class StubGenerator: public StubCodeGenerator { 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2, }; + __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + + StubCodeMark mark(this, stub_id); address start = __ pc(); Register buf = c_rarg0; @@ -3868,7 +3911,19 @@ class StubGenerator: public StubCodeGenerator { // c_rarg2 - int offset // c_rarg3 - int limit // - address generate_sha512_implCompress(bool multi_block, const char *name) { + address generate_sha512_implCompress(StubGenStubId stub_id) { + bool multi_block; + switch (stub_id) { + case sha512_implCompress_id: + multi_block = false; + break; + case sha512_implCompressMB_id: + multi_block = true; + break; + default: + ShouldNotReachHere(); + } + static const uint64_t round_consts[80] = { 0x428A2F98D728AE22L, 0x7137449123EF65CDL, 0xB5C0FBCFEC4D3B2FL, 0xE9B5DBA58189DBBCL, 0x3956C25BF348B538L, 0x59F111F1B605D019L, @@ -3900,7 +3955,8 @@ class StubGenerator: public StubCodeGenerator { }; __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + + StubCodeMark mark(this, stub_id); address start = __ pc(); Register buf = c_rarg0; @@ -4016,7 +4072,19 @@ class StubGenerator: public StubCodeGenerator { // c_rarg3 - int offset // c_rarg4 - int limit // - address generate_sha3_implCompress(bool multi_block, const char *name) { + address generate_sha3_implCompress(StubGenStubId stub_id) { + bool multi_block; + switch (stub_id) { + case sha3_implCompress_id: + multi_block = false; + break; + case sha3_implCompressMB_id: + multi_block = true; + break; + default: + ShouldNotReachHere(); + } + static const uint64_t round_consts[24] = { 0x0000000000000001L, 0x0000000000008082L, 0x800000000000808AL, 0x8000000080008000L, 0x000000000000808BL, 0x0000000080000001L, @@ -4029,7 +4097,8 @@ class StubGenerator: public StubCodeGenerator { }; __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + + StubCodeMark mark(this, stub_id); address start = __ pc(); Register buf = c_rarg0; @@ -4246,7 +4315,8 @@ class StubGenerator: public StubCodeGenerator { assert(UseCRC32Intrinsics, "what are we doing here?"); __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); + StubGenStubId stub_id = StubGenStubId::updateBytesCRC32_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -4291,7 +4361,8 @@ class StubGenerator: public StubCodeGenerator { __ emit_int64(0x0E0D0C0F0A09080BUL); __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "chacha20Block"); + StubGenStubId stub_id = StubGenStubId::chacha20Block_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -4413,7 +4484,8 @@ class StubGenerator: public StubCodeGenerator { assert(UseCRC32CIntrinsics, "what are we doing here?"); __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32C"); + StubGenStubId stub_id = StubGenStubId::updateBytesCRC32C_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -4451,7 +4523,8 @@ class StubGenerator: public StubCodeGenerator { */ address generate_updateBytesAdler32() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "updateBytesAdler32"); + StubGenStubId stub_id = StubGenStubId::updateBytesAdler32_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); Label L_simple_by1_loop, L_nmax, L_nmax_loop, L_by16, L_by16_loop, L_by1_loop, L_do_mod, L_combine, L_by1; @@ -4672,7 +4745,8 @@ class StubGenerator: public StubCodeGenerator { */ address generate_multiplyToLen() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); + StubGenStubId stub_id = StubGenStubId::multiplyToLen_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); const Register x = r0; @@ -4704,7 +4778,8 @@ class StubGenerator: public StubCodeGenerator { // faster than multiply_to_len on some CPUs and slower on others, but // multiply_to_len shows a bit better overall results __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "squareToLen"); + StubGenStubId stub_id = StubGenStubId::squareToLen_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); const Register x = r0; @@ -4737,7 +4812,8 @@ class StubGenerator: public StubCodeGenerator { address generate_mulAdd() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "mulAdd"); + StubGenStubId stub_id = StubGenStubId::mulAdd_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -4767,7 +4843,8 @@ class StubGenerator: public StubCodeGenerator { // address generate_bigIntegerRightShift() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "bigIntegerRightShiftWorker"); + StubGenStubId stub_id = StubGenStubId::bigIntegerRightShiftWorker_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); Label ShiftSIMDLoop, ShiftTwoLoop, ShiftThree, ShiftTwo, ShiftOne, Exit; @@ -4889,7 +4966,8 @@ class StubGenerator: public StubCodeGenerator { // address generate_bigIntegerLeftShift() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "bigIntegerLeftShiftWorker"); + StubGenStubId stub_id = StubGenStubId::bigIntegerLeftShiftWorker_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); Label ShiftSIMDLoop, ShiftTwoLoop, ShiftThree, ShiftTwo, ShiftOne, Exit; @@ -4997,7 +5075,8 @@ class StubGenerator: public StubCodeGenerator { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "count_positives"); + StubGenStubId stub_id = StubGenStubId::count_positives_id; + StubCodeMark mark(this, stub_id); address entry = __ pc(); @@ -5258,7 +5337,8 @@ class StubGenerator: public StubCodeGenerator { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "large_array_equals"); + StubGenStubId stub_id = StubGenStubId::large_array_equals_id; + StubCodeMark mark(this, stub_id); address entry = __ pc(); __ enter(); @@ -5383,29 +5463,29 @@ class StubGenerator: public StubCodeGenerator { __ align(CodeEntryAlignment); - const char *mark_name = ""; + StubGenStubId stub_id; switch (eltype) { case T_BOOLEAN: - mark_name = "_large_arrays_hashcode_boolean"; + stub_id = StubGenStubId::large_arrays_hashcode_boolean_id; break; case T_BYTE: - mark_name = "_large_arrays_hashcode_byte"; + stub_id = StubGenStubId::large_arrays_hashcode_byte_id; break; case T_CHAR: - mark_name = "_large_arrays_hashcode_char"; + stub_id = StubGenStubId::large_arrays_hashcode_char_id; break; case T_SHORT: - mark_name = "_large_arrays_hashcode_short"; + stub_id = StubGenStubId::large_arrays_hashcode_short_id; break; case T_INT: - mark_name = "_large_arrays_hashcode_int"; + stub_id = StubGenStubId::large_arrays_hashcode_int_id; break; default: - mark_name = "_large_arrays_hashcode_incorrect_type"; - __ should_not_reach_here(); + stub_id = StubGenStubId::NO_STUBID; + ShouldNotReachHere(); }; - StubCodeMark mark(this, "StubRoutines", mark_name); + StubCodeMark mark(this, stub_id); address entry = __ pc(); __ enter(); @@ -5638,7 +5718,8 @@ class StubGenerator: public StubCodeGenerator { address generate_dsin_dcos(bool isCos) { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", isCos ? "libmDcos" : "libmDsin"); + StubGenStubId stub_id = (isCos ? StubGenStubId::dcos_id : StubGenStubId::dsin_id); + StubCodeMark mark(this, stub_id); address start = __ pc(); __ generate_dsin_dcos(isCos, (address)StubRoutines::aarch64::_npio2_hw, (address)StubRoutines::aarch64::_two_over_pi, @@ -5689,9 +5770,8 @@ class StubGenerator: public StubCodeGenerator { // r11 = tmp2 address generate_compare_long_string_different_encoding(bool isLU) { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", isLU - ? "compare_long_string_different_encoding LU" - : "compare_long_string_different_encoding UL"); + StubGenStubId stub_id = (isLU ? StubGenStubId::compare_long_string_LU_id : StubGenStubId::compare_long_string_UL_id); + StubCodeMark mark(this, stub_id); address entry = __ pc(); Label SMALL_LOOP, TAIL, TAIL_LOAD_16, LOAD_LAST, DIFF1, DIFF2, DONE, CALCULATE_DIFFERENCE, LARGE_LOOP_PREFETCH, NO_PREFETCH, @@ -5800,7 +5880,8 @@ class StubGenerator: public StubCodeGenerator { // v1 = temporary float register address generate_float16ToFloat() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "float16ToFloat"); + StubGenStubId stub_id = StubGenStubId::hf2f_id; + StubCodeMark mark(this, stub_id); address entry = __ pc(); BLOCK_COMMENT("Entry:"); __ flt16_to_flt(v0, r0, v1); @@ -5813,7 +5894,8 @@ class StubGenerator: public StubCodeGenerator { // v1 = temporary float register address generate_floatToFloat16() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "floatToFloat16"); + StubGenStubId stub_id = StubGenStubId::f2hf_id; + StubCodeMark mark(this, stub_id); address entry = __ pc(); BLOCK_COMMENT("Entry:"); __ flt_to_flt16(r0, v0, v1); @@ -5823,7 +5905,8 @@ class StubGenerator: public StubCodeGenerator { address generate_method_entry_barrier() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "nmethod_entry_barrier"); + StubGenStubId stub_id = StubGenStubId::method_entry_barrier_id; + StubCodeMark mark(this, stub_id); Label deoptimize_label; @@ -5888,9 +5971,8 @@ class StubGenerator: public StubCodeGenerator { // r11 = tmp2 address generate_compare_long_string_same_encoding(bool isLL) { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", isLL - ? "compare_long_string_same_encoding LL" - : "compare_long_string_same_encoding UU"); + StubGenStubId stub_id = (isLL ? StubGenStubId::compare_long_string_LL_id : StubGenStubId::compare_long_string_UU_id); + StubCodeMark mark(this, stub_id); address entry = __ pc(); Register result = r0, str1 = r1, cnt1 = r2, str2 = r3, cnt2 = r4, tmp1 = r10, tmp2 = r11, tmp1h = rscratch1, tmp2h = rscratch2; @@ -6020,6 +6102,15 @@ class StubGenerator: public StubCodeGenerator { // p0 = pgtmp1 // p1 = pgtmp2 address generate_compare_long_string_sve(string_compare_mode mode) { + StubGenStubId stub_id; + switch (mode) { + case LL: stub_id = StubGenStubId::compare_long_string_LL_id; break; + case LU: stub_id = StubGenStubId::compare_long_string_LU_id; break; + case UL: stub_id = StubGenStubId::compare_long_string_UL_id; break; + case UU: stub_id = StubGenStubId::compare_long_string_UU_id; break; + default: ShouldNotReachHere(); + } + __ align(CodeEntryAlignment); address entry = __ pc(); Register result = r0, str1 = r1, cnt1 = r2, str2 = r3, cnt2 = r4, @@ -6055,16 +6146,7 @@ class StubGenerator: public StubCodeGenerator { ShouldNotReachHere(); \ } - const char* stubname; - switch (mode) { - case LL: stubname = "compare_long_string_same_encoding LL"; break; - case LU: stubname = "compare_long_string_different_encoding LU"; break; - case UL: stubname = "compare_long_string_different_encoding UL"; break; - case UU: stubname = "compare_long_string_same_encoding UU"; break; - default: ShouldNotReachHere(); - } - - StubCodeMark mark(this, "StubRoutines", stubname); + StubCodeMark mark(this, stub_id); __ mov(idx, 0); __ sve_whilelt(pgtmp1, mode == LL ? __ B : __ H, idx, cnt); @@ -6156,11 +6238,22 @@ class StubGenerator: public StubCodeGenerator { // larger and a bit less readable, however, most of extra operations are // issued during loads or branches, so, penalty is minimal address generate_string_indexof_linear(bool str1_isL, bool str2_isL) { - const char* stubName = str1_isL - ? (str2_isL ? "indexof_linear_ll" : "indexof_linear_ul") - : "indexof_linear_uu"; + StubGenStubId stub_id; + if (str1_isL) { + if (str2_isL) { + stub_id = StubGenStubId::string_indexof_linear_ll_id; + } else { + stub_id = StubGenStubId::string_indexof_linear_ul_id; + } + } else { + if (str2_isL) { + ShouldNotReachHere(); + } else { + stub_id = StubGenStubId::string_indexof_linear_uu_id; + } + } __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", stubName); + StubCodeMark mark(this, stub_id); address entry = __ pc(); int str1_chr_size = str1_isL ? 1 : 2; @@ -6458,7 +6551,8 @@ class StubGenerator: public StubCodeGenerator { // Clobbers: r0, r1, r3, rscratch1, rflags, v0-v6 address generate_large_byte_array_inflate() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "large_byte_array_inflate"); + StubGenStubId stub_id = StubGenStubId::large_byte_array_inflate_id; + StubCodeMark mark(this, stub_id); address entry = __ pc(); Label LOOP, LOOP_START, LOOP_PRFM, LOOP_PRFM_START, DONE; Register src = r0, dst = r1, len = r2, octetCounter = r3; @@ -6523,7 +6617,8 @@ class StubGenerator: public StubCodeGenerator { // that) and keep the data in little-endian bit order through the // calculation, bit-reversing the inputs and outputs. - StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); + StubGenStubId stub_id = StubGenStubId::ghash_processBlocks_id; + StubCodeMark mark(this, stub_id); __ align(wordSize * 2); address p = __ pc(); __ emit_int64(0x87); // The low-order bits of the field @@ -6589,7 +6684,8 @@ class StubGenerator: public StubCodeGenerator { address generate_ghash_processBlocks_wide() { address small = generate_ghash_processBlocks(); - StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks_wide"); + StubGenStubId stub_id = StubGenStubId::ghash_processBlocks_wide_id; + StubCodeMark mark(this, stub_id); __ align(wordSize * 2); address p = __ pc(); __ emit_int64(0x87); // The low-order bits of the field @@ -6700,7 +6796,8 @@ class StubGenerator: public StubCodeGenerator { }; __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "encodeBlock"); + StubGenStubId stub_id = StubGenStubId::base64_encodeBlock_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); Register src = c_rarg0; // source array @@ -6968,7 +7065,8 @@ class StubGenerator: public StubCodeGenerator { }; __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "decodeBlock"); + StubGenStubId stub_id = StubGenStubId::base64_decodeBlock_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); Register src = c_rarg0; // source array @@ -7084,7 +7182,8 @@ class StubGenerator: public StubCodeGenerator { // Support for spin waits. address generate_spin_wait() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "spin_wait"); + StubGenStubId stub_id = StubGenStubId::spin_wait_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ spin_wait(); @@ -7093,10 +7192,10 @@ class StubGenerator: public StubCodeGenerator { return start; } - address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) { - StubCodeMark mark(this, "StubRoutines", "lookup_secondary_supers_table"); + void generate_lookup_secondary_supers_table_stub() { + StubGenStubId stub_id = StubGenStubId::lookup_secondary_supers_table_id; + StubCodeMark mark(this, stub_id); - address start = __ pc(); const Register r_super_klass = r0, r_array_base = r1, @@ -7108,21 +7207,23 @@ class StubGenerator: public StubCodeGenerator { const FloatRegister vtemp = v0; - Label L_success; - __ enter(); - __ lookup_secondary_supers_table_const(r_sub_klass, r_super_klass, - r_array_base, r_array_length, r_array_index, - vtemp, result, super_klass_index, - /*stub_is_near*/true); - __ leave(); - __ ret(lr); - - return start; + for (int slot = 0; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) { + StubRoutines::_lookup_secondary_supers_table_stubs[slot] = __ pc(); + Label L_success; + __ enter(); + __ lookup_secondary_supers_table_const(r_sub_klass, r_super_klass, + r_array_base, r_array_length, r_array_index, + vtemp, result, slot, + /*stub_is_near*/true); + __ leave(); + __ ret(lr); + } } // Slow path implementation for UseSecondarySupersTable. address generate_lookup_secondary_supers_table_slow_path_stub() { - StubCodeMark mark(this, "StubRoutines", "lookup_secondary_supers_table_slow_path"); + StubGenStubId stub_id = StubGenStubId::lookup_secondary_supers_table_slow_path_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); const Register @@ -7275,9 +7376,9 @@ class StubGenerator: public StubCodeGenerator { if (! UseLSE) { return; } - __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "atomic entry points"); + StubGenStubId stub_id = StubGenStubId::atomic_entry_points_id; + StubCodeMark mark(this, stub_id); address first_entry = __ pc(); // ADD, memory_order_conservative @@ -7436,7 +7537,8 @@ class StubGenerator: public StubCodeGenerator { address generate_cont_thaw() { if (!Continuations::enabled()) return nullptr; - StubCodeMark mark(this, "StubRoutines", "Cont thaw"); + StubGenStubId stub_id = StubGenStubId::cont_thaw_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); generate_cont_thaw(Continuation::thaw_top); return start; @@ -7446,7 +7548,8 @@ class StubGenerator: public StubCodeGenerator { if (!Continuations::enabled()) return nullptr; // TODO: will probably need multiple return barriers depending on return type - StubCodeMark mark(this, "StubRoutines", "cont return barrier"); + StubGenStubId stub_id = StubGenStubId::cont_returnBarrier_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); generate_cont_thaw(Continuation::thaw_return_barrier); @@ -7457,7 +7560,8 @@ class StubGenerator: public StubCodeGenerator { address generate_cont_returnBarrier_exception() { if (!Continuations::enabled()) return nullptr; - StubCodeMark mark(this, "StubRoutines", "cont return barrier exception handler"); + StubGenStubId stub_id = StubGenStubId::cont_returnBarrierExc_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); generate_cont_thaw(Continuation::thaw_return_barrier_exception); @@ -7467,7 +7571,8 @@ class StubGenerator: public StubCodeGenerator { address generate_cont_preempt_stub() { if (!Continuations::enabled()) return nullptr; - StubCodeMark mark(this, "StubRoutines","Continuation preempt stub"); + StubGenStubId stub_id = StubGenStubId::cont_preempt_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ reset_last_Java_frame(true); @@ -7549,7 +7654,8 @@ class StubGenerator: public StubCodeGenerator { address generate_poly1305_processBlocks() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "poly1305_processBlocks"); + StubGenStubId stub_id = StubGenStubId::poly1305_processBlocks_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); Label here; __ enter(); @@ -7663,7 +7769,8 @@ class StubGenerator: public StubCodeGenerator { // exception handler for upcall stubs address generate_upcall_stub_exception_handler() { - StubCodeMark mark(this, "StubRoutines", "upcall stub exception handler"); + StubGenStubId stub_id = StubGenStubId::upcall_stub_exception_handler_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); // Native caller has no idea how to handle exceptions, @@ -7680,7 +7787,8 @@ class StubGenerator: public StubCodeGenerator { // j_rarg0 = jobject receiver // rmethod = result address generate_upcall_stub_load_target() { - StubCodeMark mark(this, "StubRoutines", "upcall_stub_load_target"); + StubGenStubId stub_id = StubGenStubId::upcall_stub_load_target_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ resolve_global_jobject(j_rarg0, rscratch1, rscratch2); @@ -8669,9 +8777,8 @@ class StubGenerator: public StubCodeGenerator { StubRoutines::aarch64::_spin_wait = generate_spin_wait(); - if (UsePoly1305Intrinsics) { - StubRoutines::_poly1305_processBlocks = generate_poly1305_processBlocks(); - } + StubRoutines::_upcall_stub_exception_handler = generate_upcall_stub_exception_handler(); + StubRoutines::_upcall_stub_load_target = generate_upcall_stub_load_target(); #if defined (LINUX) && !defined (__ARM_FEATURE_ATOMICS) @@ -8683,17 +8790,11 @@ class StubGenerator: public StubCodeGenerator { if (UseSecondarySupersTable) { StubRoutines::_lookup_secondary_supers_table_slow_path_stub = generate_lookup_secondary_supers_table_slow_path_stub(); if (! InlineSecondarySupersTest) { - for (int slot = 0; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) { - StubRoutines::_lookup_secondary_supers_table_stubs[slot] - = generate_lookup_secondary_supers_table_stub(slot); - } + generate_lookup_secondary_supers_table_stub(); } } #endif - StubRoutines::_upcall_stub_exception_handler = generate_upcall_stub_exception_handler(); - StubRoutines::_upcall_stub_load_target = generate_upcall_stub_load_target(); - StubRoutines::aarch64::set_completed(); // Inidicate that arraycopy and zero_blocks stubs are generated } @@ -8701,7 +8802,7 @@ class StubGenerator: public StubCodeGenerator { #if COMPILER2_OR_JVMCI if (UseSVE == 0) { - StubRoutines::aarch64::_vector_iota_indices = generate_iota_indices("iota_indices"); + StubRoutines::aarch64::_vector_iota_indices = generate_iota_indices(StubGenStubId::vector_iota_indices_id); } // array equals stub for large arrays. @@ -8745,13 +8846,15 @@ class StubGenerator: public StubCodeGenerator { } if (UseMontgomeryMultiplyIntrinsic) { - StubCodeMark mark(this, "StubRoutines", "montgomeryMultiply"); + StubGenStubId stub_id = StubGenStubId::montgomeryMultiply_id; + StubCodeMark mark(this, stub_id); MontgomeryMultiplyGenerator g(_masm, /*squaring*/false); StubRoutines::_montgomeryMultiply = g.generate_multiply(); } if (UseMontgomerySquareIntrinsic) { - StubCodeMark mark(this, "StubRoutines", "montgomerySquare"); + StubGenStubId stub_id = StubGenStubId::montgomerySquare_id; + StubCodeMark mark(this, stub_id); MontgomeryMultiplyGenerator g(_masm, /*squaring*/true); // We use generate_multiply() rather than generate_square() // because it's faster for the sizes of modulus we care about. @@ -8791,24 +8894,28 @@ class StubGenerator: public StubCodeGenerator { } if (UseMD5Intrinsics) { - StubRoutines::_md5_implCompress = generate_md5_implCompress(false, "md5_implCompress"); - StubRoutines::_md5_implCompressMB = generate_md5_implCompress(true, "md5_implCompressMB"); + StubRoutines::_md5_implCompress = generate_md5_implCompress(StubGenStubId::md5_implCompress_id); + StubRoutines::_md5_implCompressMB = generate_md5_implCompress(StubGenStubId::md5_implCompressMB_id); } if (UseSHA1Intrinsics) { - StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress"); - StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB"); + StubRoutines::_sha1_implCompress = generate_sha1_implCompress(StubGenStubId::sha1_implCompress_id); + StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(StubGenStubId::sha1_implCompressMB_id); } if (UseSHA256Intrinsics) { - StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress"); - StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB"); + StubRoutines::_sha256_implCompress = generate_sha256_implCompress(StubGenStubId::sha256_implCompress_id); + StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(StubGenStubId::sha256_implCompressMB_id); } if (UseSHA512Intrinsics) { - StubRoutines::_sha512_implCompress = generate_sha512_implCompress(false, "sha512_implCompress"); - StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB"); + StubRoutines::_sha512_implCompress = generate_sha512_implCompress(StubGenStubId::sha512_implCompress_id); + StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(StubGenStubId::sha512_implCompressMB_id); } if (UseSHA3Intrinsics) { - StubRoutines::_sha3_implCompress = generate_sha3_implCompress(false, "sha3_implCompress"); - StubRoutines::_sha3_implCompressMB = generate_sha3_implCompress(true, "sha3_implCompressMB"); + StubRoutines::_sha3_implCompress = generate_sha3_implCompress(StubGenStubId::sha3_implCompress_id); + StubRoutines::_sha3_implCompressMB = generate_sha3_implCompress(StubGenStubId::sha3_implCompressMB_id); + } + + if (UsePoly1305Intrinsics) { + StubRoutines::_poly1305_processBlocks = generate_poly1305_processBlocks(); } // generate Adler32 intrinsics code @@ -8820,29 +8927,29 @@ class StubGenerator: public StubCodeGenerator { } public: - StubGenerator(CodeBuffer* code, StubsKind kind) : StubCodeGenerator(code) { - switch(kind) { - case Initial_stubs: + StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) { + switch(blob_id) { + case initial_id: generate_initial_stubs(); break; - case Continuation_stubs: + case continuation_id: generate_continuation_stubs(); break; - case Compiler_stubs: + case compiler_id: generate_compiler_stubs(); break; - case Final_stubs: + case final_id: generate_final_stubs(); break; default: - fatal("unexpected stubs kind: %d", kind); + fatal("unexpected blob id: %d", blob_id); break; }; } }; // end class declaration -void StubGenerator_generate(CodeBuffer* code, StubCodeGenerator::StubsKind kind) { - StubGenerator g(code, kind); +void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id) { + StubGenerator g(code, blob_id); } diff --git a/src/hotspot/cpu/aarch64/stubRoutines_aarch64.cpp b/src/hotspot/cpu/aarch64/stubRoutines_aarch64.cpp index 407e3d70af9ac..3fa1616bf6586 100644 --- a/src/hotspot/cpu/aarch64/stubRoutines_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/stubRoutines_aarch64.cpp @@ -29,40 +29,22 @@ #include "runtime/stubRoutines.hpp" #include "utilities/globalDefinitions.hpp" -// Implementation of the platform-specific part of StubRoutines - for -// a description of how to extend it, see the stubRoutines.hpp file. +// function used as default for spin_wait stub -address StubRoutines::aarch64::_get_previous_sp_entry = nullptr; +static void empty_spin_wait() { } -address StubRoutines::aarch64::_f2i_fixup = nullptr; -address StubRoutines::aarch64::_f2l_fixup = nullptr; -address StubRoutines::aarch64::_d2i_fixup = nullptr; -address StubRoutines::aarch64::_d2l_fixup = nullptr; -address StubRoutines::aarch64::_vector_iota_indices = nullptr; -address StubRoutines::aarch64::_float_sign_mask = nullptr; -address StubRoutines::aarch64::_float_sign_flip = nullptr; -address StubRoutines::aarch64::_double_sign_mask = nullptr; -address StubRoutines::aarch64::_double_sign_flip = nullptr; -address StubRoutines::aarch64::_zero_blocks = nullptr; -address StubRoutines::aarch64::_count_positives = nullptr; -address StubRoutines::aarch64::_count_positives_long = nullptr; -address StubRoutines::aarch64::_large_array_equals = nullptr; -address StubRoutines::aarch64::_large_arrays_hashcode_boolean = nullptr; -address StubRoutines::aarch64::_large_arrays_hashcode_byte = nullptr; -address StubRoutines::aarch64::_large_arrays_hashcode_char = nullptr; -address StubRoutines::aarch64::_large_arrays_hashcode_int = nullptr; -address StubRoutines::aarch64::_large_arrays_hashcode_short = nullptr; -address StubRoutines::aarch64::_compare_long_string_LL = nullptr; -address StubRoutines::aarch64::_compare_long_string_UU = nullptr; -address StubRoutines::aarch64::_compare_long_string_LU = nullptr; -address StubRoutines::aarch64::_compare_long_string_UL = nullptr; -address StubRoutines::aarch64::_string_indexof_linear_ll = nullptr; -address StubRoutines::aarch64::_string_indexof_linear_uu = nullptr; -address StubRoutines::aarch64::_string_indexof_linear_ul = nullptr; -address StubRoutines::aarch64::_large_byte_array_inflate = nullptr; +// define fields for arch-specific entries -static void empty_spin_wait() { } -address StubRoutines::aarch64::_spin_wait = CAST_FROM_FN_PTR(address, empty_spin_wait); +#define DEFINE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) \ + address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = nullptr; + +#define DEFINE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \ + address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = CAST_FROM_FN_PTR(address, init_function); + +STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY, DEFINE_ARCH_ENTRY_INIT) + +#undef DEFINE_ARCH_ENTRY_INIT +#undef DEFINE_ARCH_ENTRY bool StubRoutines::aarch64::_completed = false; diff --git a/src/hotspot/cpu/aarch64/stubRoutines_aarch64.hpp b/src/hotspot/cpu/aarch64/stubRoutines_aarch64.hpp index 7d3b72a88363d..a5ed87cdca454 100644 --- a/src/hotspot/cpu/aarch64/stubRoutines_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/stubRoutines_aarch64.hpp @@ -34,134 +34,66 @@ static bool returns_to_call_stub(address return_pc) { return return_pc == _call_stub_return_address; } +// emit enum used to size per-blob code buffers + +#define DEFINE_BLOB_SIZE(blob_name, size) \ + _ ## blob_name ## _code_size = size, + enum platform_dependent_constants { - // simply increase sizes if too small (assembler will crash if too small) - _initial_stubs_code_size = 10000, - _continuation_stubs_code_size = 2000, - _compiler_stubs_code_size = 30000 ZGC_ONLY(+10000), - _final_stubs_code_size = 20000 ZGC_ONLY(+100000) + STUBGEN_ARCH_BLOBS_DO(DEFINE_BLOB_SIZE) }; +#undef DEFINE_BLOB_SIZE + class aarch64 { friend class StubGenerator; +#if INCLUDE_JVMCI + friend class JVMCIVMStructs; +#endif - private: - static address _get_previous_sp_entry; - - static address _f2i_fixup; - static address _f2l_fixup; - static address _d2i_fixup; - static address _d2l_fixup; - - static address _vector_iota_indices; - static address _float_sign_mask; - static address _float_sign_flip; - static address _double_sign_mask; - static address _double_sign_flip; - - static address _zero_blocks; - - static address _large_array_equals; - static address _large_arrays_hashcode_boolean; - static address _large_arrays_hashcode_byte; - static address _large_arrays_hashcode_char; - static address _large_arrays_hashcode_int; - static address _large_arrays_hashcode_short; - static address _compare_long_string_LL; - static address _compare_long_string_LU; - static address _compare_long_string_UL; - static address _compare_long_string_UU; - static address _string_indexof_linear_ll; - static address _string_indexof_linear_uu; - static address _string_indexof_linear_ul; - static address _large_byte_array_inflate; - - static address _spin_wait; - - static bool _completed; - - public: - - static address _count_positives; - static address _count_positives_long; - - static address get_previous_sp_entry() - { - return _get_previous_sp_entry; - } + // declare fields for arch-specific entries - static address f2i_fixup() - { - return _f2i_fixup; - } +#define DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) \ + static address STUB_FIELD_NAME(field_name) ; - static address f2l_fixup() - { - return _f2l_fixup; - } +#define DECLARE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \ + DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) - static address d2i_fixup() - { - return _d2i_fixup; - } - - static address d2l_fixup() - { - return _d2l_fixup; - } +private: + STUBGEN_ARCH_ENTRIES_DO(DECLARE_ARCH_ENTRY, DECLARE_ARCH_ENTRY_INIT) - static address vector_iota_indices() { - return _vector_iota_indices; - } +#undef DECLARE_ARCH_ENTRY_INIT +#undef DECLARE_ARCH_ENTRY - static address float_sign_mask() - { - return _float_sign_mask; - } - - static address float_sign_flip() - { - return _float_sign_flip; - } + static bool _completed; - static address double_sign_mask() - { - return _double_sign_mask; - } + public: - static address double_sign_flip() - { - return _double_sign_flip; - } + // declare getters for arch-specific entries - static address zero_blocks() { - return _zero_blocks; - } +#define DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name) \ + static address getter_name() { return STUB_FIELD_NAME(field_name) ; } - static address count_positives() { - return _count_positives; - } +#define DEFINE_ARCH_ENTRY_GETTER_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \ + DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name) - static address count_positives_long() { - return _count_positives_long; - } + STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY_GETTER, DEFINE_ARCH_ENTRY_GETTER_INIT) - static address large_array_equals() { - return _large_array_equals; - } +#undef DEFINE_ARCH_ENTRY_GETTER_INIT +#undef DEFINE_ARCH_ENTRY_GETTER static address large_arrays_hashcode(BasicType eltype) { switch (eltype) { case T_BOOLEAN: - return _large_arrays_hashcode_boolean; + return large_arrays_hashcode_boolean(); case T_BYTE: - return _large_arrays_hashcode_byte; + return large_arrays_hashcode_byte(); case T_CHAR: - return _large_arrays_hashcode_char; + return large_arrays_hashcode_char(); case T_SHORT: - return _large_arrays_hashcode_short; + return large_arrays_hashcode_short(); case T_INT: - return _large_arrays_hashcode_int; + return large_arrays_hashcode_int(); default: ShouldNotReachHere(); } @@ -169,42 +101,6 @@ class aarch64 { return nullptr; } - static address compare_long_string_LL() { - return _compare_long_string_LL; - } - - static address compare_long_string_LU() { - return _compare_long_string_LU; - } - - static address compare_long_string_UL() { - return _compare_long_string_UL; - } - - static address compare_long_string_UU() { - return _compare_long_string_UU; - } - - static address string_indexof_linear_ul() { - return _string_indexof_linear_ul; - } - - static address string_indexof_linear_ll() { - return _string_indexof_linear_ll; - } - - static address string_indexof_linear_uu() { - return _string_indexof_linear_uu; - } - - static address large_byte_array_inflate() { - return _large_byte_array_inflate; - } - - static address spin_wait() { - return _spin_wait; - } - static bool complete() { return _completed; } diff --git a/src/hotspot/cpu/arm/stubDeclarations_arm.hpp b/src/hotspot/cpu/arm/stubDeclarations_arm.hpp new file mode 100644 index 0000000000000..35df4b924d276 --- /dev/null +++ b/src/hotspot/cpu/arm/stubDeclarations_arm.hpp @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2025, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_ARM_STUBDECLARATIONS_HPP +#define CPU_ARM_STUBDECLARATIONS_HPP + +#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \ + do_arch_blob, \ + do_arch_entry, \ + do_arch_entry_init) \ + do_arch_blob(initial, 9000) \ + do_stub(initial, idiv_irem) \ + do_arch_entry(Arm, initial, idiv_irem, \ + idiv_irem_entry, idiv_irem_entry) \ + do_stub(initial, atomic_load_long) \ + do_arch_entry(Arm, initial, atomic_load_long, \ + atomic_load_long_entry, atomic_load_long_entry) \ + do_stub(initial, atomic_store_long) \ + do_arch_entry(Arm, initial, atomic_load_long, \ + atomic_store_long_entry, atomic_store_long_entry) \ + +#define STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, \ + do_arch_blob, \ + do_arch_entry, \ + do_arch_entry_init) \ + do_arch_blob(continuation, 2000) \ + + +#define STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, \ + do_arch_blob, \ + do_arch_entry, \ + do_arch_entry_init) \ + do_arch_blob(compiler, 22000) \ + do_stub(compiler, partial_subtype_check) \ + do_arch_entry(Arm, compiler, partial_subtype_check, \ + partial_subtype_check, partial_subtype_check) \ + + +#define STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, \ + do_arch_blob, \ + do_arch_entry, \ + do_arch_entry_init) \ + do_arch_blob(final, 22000) \ + + +#endif // CPU_ARM_STUBDECLARATIONS_HPP diff --git a/src/hotspot/cpu/arm/stubGenerator_arm.cpp b/src/hotspot/cpu/arm/stubGenerator_arm.cpp index be550d818c00c..aad81e7891d46 100644 --- a/src/hotspot/cpu/arm/stubGenerator_arm.cpp +++ b/src/hotspot/cpu/arm/stubGenerator_arm.cpp @@ -172,7 +172,8 @@ class StubGenerator: public StubCodeGenerator { private: address generate_call_stub(address& return_address) { - StubCodeMark mark(this, "StubRoutines", "call_stub"); + StubGenStubId stub_id = StubGenStubId::call_stub_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -251,7 +252,8 @@ class StubGenerator: public StubCodeGenerator { // (in) Rexception_obj: exception oop address generate_catch_exception() { - StubCodeMark mark(this, "StubRoutines", "catch_exception"); + StubGenStubId stub_id = StubGenStubId::catch_exception_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ str(Rexception_obj, Address(Rthread, Thread::pending_exception_offset())); @@ -263,7 +265,8 @@ class StubGenerator: public StubCodeGenerator { // (in) Rexception_pc: return address address generate_forward_exception() { - StubCodeMark mark(this, "StubRoutines", "forward exception"); + StubGenStubId stub_id = StubGenStubId::forward_exception_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ mov(c_rarg0, Rthread); @@ -312,6 +315,8 @@ class StubGenerator: public StubCodeGenerator { Register tmp = LR; assert(dividend == remainder, "must be"); + StubGenStubId stub_id = StubGenStubId::idiv_irem_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); // Check for special cases: divisor <= 0 or dividend < 0 @@ -453,7 +458,8 @@ class StubGenerator: public StubCodeGenerator { address generate_atomic_add() { address start; - StubCodeMark mark(this, "StubRoutines", "atomic_add"); + StubGenStubId stub_id = StubGenStubId::atomic_add_id; + StubCodeMark mark(this, stub_id); Label retry; start = __ pc(); Register addval = R0; @@ -504,7 +510,8 @@ class StubGenerator: public StubCodeGenerator { address generate_atomic_xchg() { address start; - StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); + StubGenStubId stub_id = StubGenStubId::atomic_xchg_id; + StubCodeMark mark(this, stub_id); start = __ pc(); Register newval = R0; Register dest = R1; @@ -554,7 +561,8 @@ class StubGenerator: public StubCodeGenerator { address generate_atomic_cmpxchg() { address start; - StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); + StubGenStubId stub_id = StubGenStubId::atomic_cmpxchg_id; + StubCodeMark mark(this, stub_id); start = __ pc(); Register cmp = R0; Register newval = R1; @@ -592,7 +600,8 @@ class StubGenerator: public StubCodeGenerator { address generate_atomic_cmpxchg_long() { address start; - StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); + StubGenStubId stub_id = StubGenStubId::atomic_cmpxchg_long_id; + StubCodeMark mark(this, stub_id); start = __ pc(); Register cmp_lo = R0; Register cmp_hi = R1; @@ -629,7 +638,8 @@ class StubGenerator: public StubCodeGenerator { address generate_atomic_load_long() { address start; - StubCodeMark mark(this, "StubRoutines", "atomic_load_long"); + StubGenStubId stub_id = StubGenStubId::atomic_load_long_id; + StubCodeMark mark(this, stub_id); start = __ pc(); Register result_lo = R0; Register result_hi = R1; @@ -653,7 +663,8 @@ class StubGenerator: public StubCodeGenerator { address generate_atomic_store_long() { address start; - StubCodeMark mark(this, "StubRoutines", "atomic_store_long"); + StubGenStubId stub_id = StubGenStubId::atomic_store_long_id; + StubCodeMark mark(this, stub_id); start = __ pc(); Register newval_lo = R0; Register newval_hi = R1; @@ -695,7 +706,8 @@ class StubGenerator: public StubCodeGenerator { // raddr: LR, blown by call address generate_partial_subtype_check() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "partial_subtype_check"); + StubGenStubId stub_id = StubGenStubId::partial_subtype_check_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); // based on SPARC check_klass_subtype_[fast|slow]_path (without CompressedOops) @@ -784,7 +796,8 @@ class StubGenerator: public StubCodeGenerator { // Non-destructive plausibility checks for oops address generate_verify_oop() { - StubCodeMark mark(this, "StubRoutines", "verify_oop"); + StubGenStubId stub_id = StubGenStubId::verify_oop_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); // Incoming arguments: @@ -1985,6 +1998,23 @@ class StubGenerator: public StubCodeGenerator { return start_pc; } + /* Internal development flag */ + /* enabled by defining TEST_C2_GENERIC_ARRAYCOPY */ + + // With this flag, the C2 stubs are tested by generating calls to + // generic_arraycopy instead of Runtime1::arraycopy + + // Runtime1::arraycopy return a status in R0 (0 if OK, else ~copied) + // and the result is tested to see whether the arraycopy stub should + // be called. + + // When we test arraycopy this way, we must generate extra code in the + // arraycopy methods callable from C2 generic_arraycopy to set the + // status to 0 for those who always succeed (calling the slow path stub might + // lead to errors since the copy has already been performed). + + static const bool set_status; + // // Generate stub for primitive array copy. If "aligned" is true, the // "from" and "to" addresses are assumed to be heapword aligned. @@ -1997,9 +2027,109 @@ class StubGenerator: public StubCodeGenerator { // to: R1 // count: R2 treated as signed 32-bit int // - address generate_primitive_copy(bool aligned, const char * name, bool status, int bytes_per_count, bool disjoint, address nooverlap_target = nullptr) { + address generate_primitive_copy(StubGenStubId stub_id, address nooverlap_target = nullptr) { + bool aligned; + bool status; + int bytes_per_count; + bool disjoint; + + switch (stub_id) { + case jbyte_disjoint_arraycopy_id: + aligned = false; + status = true; + bytes_per_count = 1; + disjoint = true; + break; + case jshort_disjoint_arraycopy_id: + aligned = false; + status = true; + bytes_per_count = 2; + disjoint = true; + break; + case jint_disjoint_arraycopy_id: + aligned = false; + status = true; + bytes_per_count = 4; + disjoint = true; + break; + case jlong_disjoint_arraycopy_id: + aligned = false; + status = true; + bytes_per_count = 8; + disjoint = true; + break; + case arrayof_jbyte_disjoint_arraycopy_id: + aligned = true; + status = set_status; + bytes_per_count = 1; + disjoint = true; + break; + case arrayof_jshort_disjoint_arraycopy_id: + aligned = true; + status = set_status; + bytes_per_count = 2; + disjoint = true; + break; + case arrayof_jint_disjoint_arraycopy_id: + aligned = true; + status = set_status; + bytes_per_count = 4; + disjoint = true; + break; + case arrayof_jlong_disjoint_arraycopy_id: + aligned = false; + status = set_status; + bytes_per_count = 8; + disjoint = true; + break; + case jbyte_arraycopy_id: + aligned = false; + status = true; + bytes_per_count = 1; + disjoint = false; + break; + case jshort_arraycopy_id: + aligned = false; + status = true; + bytes_per_count = 2; + disjoint = false; + break; + case jint_arraycopy_id: + aligned = false; + status = true; + bytes_per_count = 4; + disjoint = false; + break; + case jlong_arraycopy_id: + aligned = false; + status = true; + bytes_per_count = 8; + disjoint = false; + break; + case arrayof_jbyte_arraycopy_id: + aligned = true; + status = set_status; + bytes_per_count = 1; + disjoint = false; + break; + case arrayof_jshort_arraycopy_id: + aligned = true; + status = set_status; + bytes_per_count = 2; + disjoint = false; + break; + case arrayof_jint_arraycopy_id: + aligned = true; + status = set_status; + bytes_per_count = 4; + disjoint = false; + break; + default: + ShouldNotReachHere(); + } + __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); const Register from = R0; // source array address @@ -2171,9 +2301,38 @@ class StubGenerator: public StubCodeGenerator { // to: R1 // count: R2 treated as signed 32-bit int // - address generate_oop_copy(bool aligned, const char * name, bool status, bool disjoint, address nooverlap_target = nullptr) { + address generate_oop_copy(StubGenStubId stub_id, address nooverlap_target = nullptr) { + bool aligned; + bool status; + bool disjoint; + + switch (stub_id) { + case oop_disjoint_arraycopy_id: + aligned = false; + status = true; + disjoint = true; + break; + case arrayof_oop_disjoint_arraycopy_id: + aligned = true; + status = set_status; + disjoint = true; + break; + case oop_arraycopy_id: + aligned = false; + status = true; + disjoint = false; + break; + case arrayof_oop_arraycopy_id: + aligned = true; + status = set_status; + disjoint = false; + break; + default: + ShouldNotReachHere(); + } + __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); Register from = R0; @@ -2308,7 +2467,7 @@ class StubGenerator: public StubCodeGenerator { // Examines the alignment of the operands and dispatches // to a long, int, short, or byte copy loop. // - address generate_unsafe_copy(const char* name) { + address generate_unsafe_copy() { const Register R0_from = R0; // source array address const Register R1_to = R1; // destination array address @@ -2317,7 +2476,8 @@ class StubGenerator: public StubCodeGenerator { const Register R3_bits = R3; // test copy of low bits __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubGenStubId stub_id = StubGenStubId::unsafe_arraycopy_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); const Register tmp = Rtemp; @@ -2442,9 +2602,10 @@ class StubGenerator: public StubCodeGenerator { // ckval: R4 (super_klass) // ret: R0 zero for success; (-1^K) where K is partial transfer count (32-bit) // - address generate_checkcast_copy(const char * name) { + address generate_checkcast_copy() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubGenStubId stub_id = StubGenStubId::checkcast_arraycopy_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); const Register from = R0; // source array address @@ -2595,7 +2756,7 @@ class StubGenerator: public StubCodeGenerator { // R0 == 0 - success // R0 < 0 - need to call System.arraycopy // - address generate_generic_copy(const char *name) { + address generate_generic_copy() { Label L_failed, L_objArray; // Input registers @@ -2611,7 +2772,8 @@ class StubGenerator: public StubCodeGenerator { const Register R8_temp = R8; __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubGenStubId stub_id = StubGenStubId::generic_arraycopy_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ zap_high_non_significant_bits(R1); @@ -2842,72 +3004,55 @@ class StubGenerator: public StubCodeGenerator { // Note: the disjoint stubs must be generated first, some of // the conjoint stubs use them. - bool status = false; // non failing C2 stubs need not return a status in R0 - -#ifdef TEST_C2_GENERIC_ARRAYCOPY /* Internal development flag */ - // With this flag, the C2 stubs are tested by generating calls to - // generic_arraycopy instead of Runtime1::arraycopy - - // Runtime1::arraycopy return a status in R0 (0 if OK, else ~copied) - // and the result is tested to see whether the arraycopy stub should - // be called. - - // When we test arraycopy this way, we must generate extra code in the - // arraycopy methods callable from C2 generic_arraycopy to set the - // status to 0 for those who always succeed (calling the slow path stub might - // lead to errors since the copy has already been performed). - - status = true; // generate a status compatible with C1 calls -#endif - address ucm_common_error_exit = generate_unsafecopy_common_error_exit(); UnsafeMemoryAccess::set_common_exit_stub_pc(ucm_common_error_exit); // these need always status in case they are called from generic_arraycopy - StubRoutines::_jbyte_disjoint_arraycopy = generate_primitive_copy(false, "jbyte_disjoint_arraycopy", true, 1, true); - StubRoutines::_jshort_disjoint_arraycopy = generate_primitive_copy(false, "jshort_disjoint_arraycopy", true, 2, true); - StubRoutines::_jint_disjoint_arraycopy = generate_primitive_copy(false, "jint_disjoint_arraycopy", true, 4, true); - StubRoutines::_jlong_disjoint_arraycopy = generate_primitive_copy(false, "jlong_disjoint_arraycopy", true, 8, true); - StubRoutines::_oop_disjoint_arraycopy = generate_oop_copy (false, "oop_disjoint_arraycopy", true, true); - - StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_primitive_copy(true, "arrayof_jbyte_disjoint_arraycopy", status, 1, true); - StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_primitive_copy(true, "arrayof_jshort_disjoint_arraycopy",status, 2, true); - StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_primitive_copy(true, "arrayof_jint_disjoint_arraycopy", status, 4, true); - StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_primitive_copy(true, "arrayof_jlong_disjoint_arraycopy", status, 8, true); - StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_oop_copy (true, "arrayof_oop_disjoint_arraycopy", status, true); + StubRoutines::_jbyte_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::jbyte_disjoint_arraycopy_id); + StubRoutines::_jshort_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::jshort_disjoint_arraycopy_id); + StubRoutines::_jint_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::jint_disjoint_arraycopy_id); + StubRoutines::_jlong_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::jlong_disjoint_arraycopy_id); + StubRoutines::_oop_disjoint_arraycopy = generate_oop_copy (StubGenStubId::oop_disjoint_arraycopy_id); + + StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::arrayof_jbyte_disjoint_arraycopy_id); + StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::arrayof_jshort_disjoint_arraycopy_id); + StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::arrayof_jint_disjoint_arraycopy_id); + StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::arrayof_jlong_disjoint_arraycopy_id); + StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_oop_copy (StubGenStubId::arrayof_oop_disjoint_arraycopy_id); // these need always status in case they are called from generic_arraycopy - StubRoutines::_jbyte_arraycopy = generate_primitive_copy(false, "jbyte_arraycopy", true, 1, false, StubRoutines::_jbyte_disjoint_arraycopy); - StubRoutines::_jshort_arraycopy = generate_primitive_copy(false, "jshort_arraycopy", true, 2, false, StubRoutines::_jshort_disjoint_arraycopy); - StubRoutines::_jint_arraycopy = generate_primitive_copy(false, "jint_arraycopy", true, 4, false, StubRoutines::_jint_disjoint_arraycopy); - StubRoutines::_jlong_arraycopy = generate_primitive_copy(false, "jlong_arraycopy", true, 8, false, StubRoutines::_jlong_disjoint_arraycopy); - StubRoutines::_oop_arraycopy = generate_oop_copy (false, "oop_arraycopy", true, false, StubRoutines::_oop_disjoint_arraycopy); - - StubRoutines::_arrayof_jbyte_arraycopy = generate_primitive_copy(true, "arrayof_jbyte_arraycopy", status, 1, false, StubRoutines::_arrayof_jbyte_disjoint_arraycopy); - StubRoutines::_arrayof_jshort_arraycopy = generate_primitive_copy(true, "arrayof_jshort_arraycopy", status, 2, false, StubRoutines::_arrayof_jshort_disjoint_arraycopy); + StubRoutines::_jbyte_arraycopy = generate_primitive_copy(StubGenStubId::jbyte_arraycopy_id, StubRoutines::_jbyte_disjoint_arraycopy); + StubRoutines::_jshort_arraycopy = generate_primitive_copy(StubGenStubId::jshort_arraycopy_id, StubRoutines::_jshort_disjoint_arraycopy); + StubRoutines::_jint_arraycopy = generate_primitive_copy(StubGenStubId::jint_arraycopy_id, StubRoutines::_jint_disjoint_arraycopy); + StubRoutines::_jlong_arraycopy = generate_primitive_copy(StubGenStubId::jlong_arraycopy_id, StubRoutines::_jlong_disjoint_arraycopy); + StubRoutines::_oop_arraycopy = generate_oop_copy (StubGenStubId::oop_arraycopy_id, StubRoutines::_oop_disjoint_arraycopy); + + StubRoutines::_arrayof_jbyte_arraycopy = generate_primitive_copy(StubGenStubId::arrayof_jbyte_arraycopy_id, StubRoutines::_arrayof_jbyte_disjoint_arraycopy); + StubRoutines::_arrayof_jshort_arraycopy = generate_primitive_copy(StubGenStubId::arrayof_jshort_arraycopy_id, StubRoutines::_arrayof_jshort_disjoint_arraycopy); #ifdef _LP64 // since sizeof(jint) < sizeof(HeapWord), there's a different flavor: - StubRoutines::_arrayof_jint_arraycopy = generate_primitive_copy(true, "arrayof_jint_arraycopy", status, 4, false, StubRoutines::_arrayof_jint_disjoint_arraycopy); + StubRoutines::_arrayof_jint_arraycopy = generate_primitive_copy(StubGenStubId::arrayof_jint_arraycopy_id, StubRoutines::_arrayof_jint_disjoint_arraycopy); #else StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; #endif if (BytesPerHeapOop < HeapWordSize) { - StubRoutines::_arrayof_oop_arraycopy = generate_oop_copy (true, "arrayof_oop_arraycopy", status, false, StubRoutines::_arrayof_oop_disjoint_arraycopy); + StubRoutines::_arrayof_oop_arraycopy = generate_oop_copy (StubGenStubId::arrayof_oop_arraycopy_id, StubRoutines::_arrayof_oop_disjoint_arraycopy); } else { StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; } StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; - StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy"); - StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy"); - StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy"); + StubRoutines::_checkcast_arraycopy = generate_checkcast_copy(); + StubRoutines::_unsafe_arraycopy = generate_unsafe_copy(); + StubRoutines::_generic_arraycopy = generate_generic_copy(); } address generate_method_entry_barrier() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "nmethod_entry_barrier"); + StubGenStubId stub_id = StubGenStubId::method_entry_barrier_id; + StubCodeMark mark(this, stub_id); Label deoptimize_label; @@ -2960,22 +3105,22 @@ class StubGenerator: public StubCodeGenerator { #undef __ #define __ masm-> - address generate_cont_thaw(const char* label, Continuation::thaw_kind kind) { + address generate_cont_thaw(StubGenStubId stub_id) { if (!Continuations::enabled()) return nullptr; Unimplemented(); return nullptr; } address generate_cont_thaw() { - return generate_cont_thaw("Cont thaw", Continuation::thaw_top); + return generate_cont_thaw(StubGenStubId::cont_thaw_id); } address generate_cont_returnBarrier() { - return generate_cont_thaw("Cont thaw return barrier", Continuation::thaw_return_barrier); + return generate_cont_thaw(StubGenStubId::cont_returnBarrier_id); } address generate_cont_returnBarrier_exception() { - return generate_cont_thaw("Cont thaw return barrier exception", Continuation::thaw_return_barrier_exception); + return generate_cont_thaw(StubGenStubId::cont_returnBarrierExc_id); } //--------------------------------------------------------------------------- @@ -3007,8 +3152,8 @@ class StubGenerator: public StubCodeGenerator { StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); - StubRoutines::_atomic_load_long_entry = generate_atomic_load_long(); - StubRoutines::_atomic_store_long_entry = generate_atomic_store_long(); + StubRoutines::Arm::_atomic_load_long_entry = generate_atomic_load_long(); + StubRoutines::Arm::_atomic_store_long_entry = generate_atomic_store_long(); } @@ -3058,27 +3203,36 @@ class StubGenerator: public StubCodeGenerator { } public: - StubGenerator(CodeBuffer* code, StubsKind kind) : StubCodeGenerator(code) { - switch(kind) { - case Initial_stubs: + StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) { + switch(blob_id) { + case initial_id: generate_initial_stubs(); break; - case Continuation_stubs: + case continuation_id: generate_continuation_stubs(); break; - case Compiler_stubs: + case compiler_id: generate_compiler_stubs(); break; - case Final_stubs: + case final_id: generate_final_stubs(); break; default: - fatal("unexpected stubs kind: %d", kind); + fatal("unexpected blob id: %d", blob_id); break; }; } }; // end class declaration -void StubGenerator_generate(CodeBuffer* code, StubCodeGenerator::StubsKind kind) { - StubGenerator g(code, kind); +void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id) { + StubGenerator g(code, blob_id); } + +// implementation of internal development flag + +#ifdef TEST_C2_GENERIC_ARRAYCOPY +const bool StubGenerator::set_status = true; // generate a status compatible with C1 calls +#else +const bool StubGenerator::set_status = false; // non failing C2 stubs need not return a status in R0 +#endif + diff --git a/src/hotspot/cpu/arm/stubRoutinesCrypto_arm.cpp b/src/hotspot/cpu/arm/stubRoutinesCrypto_arm.cpp index 350636fbe93e1..b663cfd92989d 100644 --- a/src/hotspot/cpu/arm/stubRoutinesCrypto_arm.cpp +++ b/src/hotspot/cpu/arm/stubRoutinesCrypto_arm.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -119,7 +119,8 @@ void aes_init() { address generate_aescrypt_encryptBlock() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "aesencryptBlock"); + StubGenStubId stub_id = StubGenStubId::aescrypt_encryptBlock_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -316,7 +317,8 @@ address generate_aescrypt_encryptBlock() { address generate_aescrypt_decryptBlock() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "aesdecryptBlock"); + StubGenStubId stub_id = StubGenStubId::aescrypt_decryptBlock_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -536,7 +538,8 @@ address generate_cipherBlockChaining_encryptAESCrypt() { // [sp+4] Transposition Box reference __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); + StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_encryptAESCrypt_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -601,7 +604,8 @@ address generate_cipherBlockChaining_encryptAESCrypt() { address generate_cipherBlockChaining_decryptAESCrypt() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); + StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_decryptAESCrypt_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); diff --git a/src/hotspot/cpu/arm/stubRoutines_arm.cpp b/src/hotspot/cpu/arm/stubRoutines_arm.cpp index ac98896163f9f..d843d89186ea1 100644 --- a/src/hotspot/cpu/arm/stubRoutines_arm.cpp +++ b/src/hotspot/cpu/arm/stubRoutines_arm.cpp @@ -26,9 +26,13 @@ #include "runtime/frame.inline.hpp" #include "runtime/stubRoutines.hpp" -address StubRoutines::Arm::_idiv_irem_entry = nullptr; +#define DEFINE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) \ + address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = nullptr; -address StubRoutines::Arm::_partial_subtype_check = nullptr; +#define DEFINE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \ + address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = CAST_FROM_FN_PTR(address, init_function); -address StubRoutines::_atomic_load_long_entry = nullptr; -address StubRoutines::_atomic_store_long_entry = nullptr; +STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY, DEFINE_ARCH_ENTRY_INIT) + +#undef DEFINE_ARCH_ENTRY_INIT +#undef DEFINE_ARCH_ENTRY diff --git a/src/hotspot/cpu/arm/stubRoutines_arm.hpp b/src/hotspot/cpu/arm/stubRoutines_arm.hpp index 05c82881cd5f6..838b3e6d3782f 100644 --- a/src/hotspot/cpu/arm/stubRoutines_arm.hpp +++ b/src/hotspot/cpu/arm/stubRoutines_arm.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,38 +29,53 @@ // definition. See stubRoutines.hpp for a description on how to // extend it. +// emit enum used to size per-blob code buffers + +#define DEFINE_BLOB_SIZE(blob_name, size) \ + _ ## blob_name ## _code_size = size, + enum platform_dependent_constants { - // simply increase sizes if too small (assembler will crash if too small) - _initial_stubs_code_size = 9000, - _continuation_stubs_code_size = 2000, - _compiler_stubs_code_size = 22000, - _final_stubs_code_size = 22000 + STUBGEN_ARCH_BLOBS_DO(DEFINE_BLOB_SIZE) }; +#undef DEFINE_BLOB_SIZE + +public: + static bool returns_to_call_stub(address return_pc) { + return return_pc == _call_stub_return_address; + } + class Arm { friend class StubGenerator; friend class VMStructs; - private: +#define DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) \ + static address STUB_FIELD_NAME(field_name) ; - static address _idiv_irem_entry; - static address _partial_subtype_check; +#define DECLARE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \ + DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) - public: +private: + STUBGEN_ARCH_ENTRIES_DO(DECLARE_ARCH_ENTRY, DECLARE_ARCH_ENTRY_INIT) - static address idiv_irem_entry() { return _idiv_irem_entry; } - static address partial_subtype_check() { return _partial_subtype_check; } -}; +#undef DECLARE_ARCH_ENTRY_INIT +#undef DECLARE_ARCH_ENTRY - static bool returns_to_call_stub(address return_pc) { - return return_pc == _call_stub_return_address; - } +public: + + // declare getters for arch-specific entries - static address _atomic_load_long_entry; - static address _atomic_store_long_entry; +#define DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name) \ + static address getter_name() { return STUB_FIELD_NAME(field_name) ; } - static address atomic_load_long_entry() { return _atomic_load_long_entry; } - static address atomic_store_long_entry() { return _atomic_store_long_entry; } +#define DEFINE_ARCH_ENTRY_GETTER_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \ + DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name) + STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY_GETTER, DEFINE_ARCH_ENTRY_GETTER_INIT) + +#undef DEFINE_ARCH_ENTRY_GETTER_INIT +#undef DEFINE_ARCH_ENTRY_GETTER + +}; #endif // CPU_ARM_STUBROUTINES_ARM_HPP diff --git a/src/hotspot/cpu/ppc/stubDeclarations_ppc.hpp b/src/hotspot/cpu/ppc/stubDeclarations_ppc.hpp new file mode 100644 index 0000000000000..1a19f1b8cf280 --- /dev/null +++ b/src/hotspot/cpu/ppc/stubDeclarations_ppc.hpp @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2025, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_PPC_STUBDECLARATIONS_HPP +#define CPU_PPC_STUBDECLARATIONS_HPP + +#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \ + do_arch_blob, \ + do_arch_entry, \ + do_arch_entry_init) \ + do_arch_blob(initial, 20000) \ + + +#define STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, \ + do_arch_blob, \ + do_arch_entry, \ + do_arch_entry_init) \ + do_arch_blob(continuation, 2000) \ + + +#define STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, \ + do_arch_blob, \ + do_arch_entry, \ + do_arch_entry_init) \ + do_arch_blob(compiler, 24000) \ + + +#define STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, \ + do_arch_blob, \ + do_arch_entry, \ + do_arch_entry_init) \ + do_arch_blob(final, 24000) \ + + +#endif // CPU_PPC_STUBDECLARATIONS_HPP diff --git a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp index f32e62560721f..795c238d1524a 100644 --- a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp +++ b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2024 SAP SE. All rights reserved. + * Copyright (c) 2012, 2025 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -89,7 +89,8 @@ class StubGenerator: public StubCodeGenerator { // Setup a new c frame, copy java arguments, call frame manager or // native_entry, and process result. - StubCodeMark mark(this, "StubRoutines", "call_stub"); + StubGenStubId stub_id = StubGenStubId::call_stub_id; + StubCodeMark mark(this, stub_id); address start = __ function_entry(); @@ -392,7 +393,8 @@ class StubGenerator: public StubCodeGenerator { // within the VM. // address generate_catch_exception() { - StubCodeMark mark(this, "StubRoutines", "catch_exception"); + StubGenStubId stub_id = StubGenStubId::catch_exception_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -447,7 +449,8 @@ class StubGenerator: public StubCodeGenerator { // (LR is unchanged and is live out). // address generate_forward_exception() { - StubCodeMark mark(this, "StubRoutines", "forward_exception"); + StubGenStubId stub_id = StubGenStubId::forward_exception_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); if (VerifyOops) { @@ -517,93 +520,6 @@ class StubGenerator: public StubCodeGenerator { #undef __ #define __ _masm-> - - // Support for void zero_words_aligned8(HeapWord* to, size_t count) - // - // Arguments: - // to: - // count: - // - // Destroys: - // - address generate_zero_words_aligned8() { - StubCodeMark mark(this, "StubRoutines", "zero_words_aligned8"); - - // Implemented as in ClearArray. - address start = __ function_entry(); - - Register base_ptr_reg = R3_ARG1; // tohw (needs to be 8b aligned) - Register cnt_dwords_reg = R4_ARG2; // count (in dwords) - Register tmp1_reg = R5_ARG3; - Register tmp2_reg = R6_ARG4; - Register zero_reg = R7_ARG5; - - // Procedure for large arrays (uses data cache block zero instruction). - Label dwloop, fast, fastloop, restloop, lastdword, done; - int cl_size = VM_Version::L1_data_cache_line_size(); - int cl_dwords = cl_size >> 3; - int cl_dwordaddr_bits = exact_log2(cl_dwords); - int min_dcbz = 2; // Needs to be positive, apply dcbz only to at least min_dcbz cache lines. - - // Clear up to 128byte boundary if long enough, dword_cnt=(16-(base>>3))%16. - __ dcbtst(base_ptr_reg); // Indicate write access to first cache line ... - __ andi(tmp2_reg, cnt_dwords_reg, 1); // to check if number of dwords is even. - __ srdi_(tmp1_reg, cnt_dwords_reg, 1); // number of double dwords - __ load_const_optimized(zero_reg, 0L); // Use as zero register. - - __ cmpdi(CCR1, tmp2_reg, 0); // cnt_dwords even? - __ beq(CCR0, lastdword); // size <= 1 - __ mtctr(tmp1_reg); // Speculatively preload counter for rest loop (>0). - __ cmpdi(CCR0, cnt_dwords_reg, (min_dcbz+1)*cl_dwords-1); // Big enough to ensure >=min_dcbz cache lines are included? - __ neg(tmp1_reg, base_ptr_reg); // bit 0..58: bogus, bit 57..60: (16-(base>>3))%16, bit 61..63: 000 - - __ blt(CCR0, restloop); // Too small. (<31=(2*cl_dwords)-1 is sufficient, but bigger performs better.) - __ rldicl_(tmp1_reg, tmp1_reg, 64-3, 64-cl_dwordaddr_bits); // Extract number of dwords to 128byte boundary=(16-(base>>3))%16. - - __ beq(CCR0, fast); // already 128byte aligned - __ mtctr(tmp1_reg); // Set ctr to hit 128byte boundary (00 since size>=256-8) - - // Clear in first cache line dword-by-dword if not already 128byte aligned. - __ bind(dwloop); - __ std(zero_reg, 0, base_ptr_reg); // Clear 8byte aligned block. - __ addi(base_ptr_reg, base_ptr_reg, 8); - __ bdnz(dwloop); - - // clear 128byte blocks - __ bind(fast); - __ srdi(tmp1_reg, cnt_dwords_reg, cl_dwordaddr_bits); // loop count for 128byte loop (>0 since size>=256-8) - __ andi(tmp2_reg, cnt_dwords_reg, 1); // to check if rest even - - __ mtctr(tmp1_reg); // load counter - __ cmpdi(CCR1, tmp2_reg, 0); // rest even? - __ rldicl_(tmp1_reg, cnt_dwords_reg, 63, 65-cl_dwordaddr_bits); // rest in double dwords - - __ bind(fastloop); - __ dcbz(base_ptr_reg); // Clear 128byte aligned block. - __ addi(base_ptr_reg, base_ptr_reg, cl_size); - __ bdnz(fastloop); - - //__ dcbtst(base_ptr_reg); // Indicate write access to last cache line. - __ beq(CCR0, lastdword); // rest<=1 - __ mtctr(tmp1_reg); // load counter - - // Clear rest. - __ bind(restloop); - __ std(zero_reg, 0, base_ptr_reg); // Clear 8byte aligned block. - __ std(zero_reg, 8, base_ptr_reg); // Clear 8byte aligned block. - __ addi(base_ptr_reg, base_ptr_reg, 16); - __ bdnz(restloop); - - __ bind(lastdword); - __ beq(CCR1, done); - __ std(zero_reg, 0, base_ptr_reg); - __ bind(done); - __ blr(); // return - - return start; - } - #if !defined(PRODUCT) // Wrapper which calls oopDesc::is_oop_or_null() // Only called by MacroAssembler::verify_oop @@ -647,8 +563,40 @@ class StubGenerator: public StubCodeGenerator { // value: R4_ARG2 // count: R5_ARG3 treated as signed // - address generate_fill(BasicType t, bool aligned, const char* name) { - StubCodeMark mark(this, "StubRoutines", name); + address generate_fill(StubGenStubId stub_id) { + BasicType t; + bool aligned; + + switch (stub_id) { + case jbyte_fill_id: + t = T_BYTE; + aligned = false; + break; + case jshort_fill_id: + t = T_SHORT; + aligned = false; + break; + case jint_fill_id: + t = T_INT; + aligned = false; + break; + case arrayof_jbyte_fill_id: + t = T_BYTE; + aligned = true; + break; + case arrayof_jshort_fill_id: + t = T_SHORT; + aligned = true; + break; + case arrayof_jint_fill_id: + t = T_INT; + aligned = true; + break; + default: + ShouldNotReachHere(); + } + + StubCodeMark mark(this, stub_id); address start = __ function_entry(); const Register to = R3_ARG1; // source array address @@ -893,8 +841,20 @@ class StubGenerator: public StubCodeGenerator { // to: R4_ARG2 // count: R5_ARG3 treated as signed // - address generate_disjoint_byte_copy(bool aligned, const char * name) { - StubCodeMark mark(this, "StubRoutines", name); + address generate_disjoint_byte_copy(StubGenStubId stub_id) { + bool aligned; + switch (stub_id) { + case jbyte_disjoint_arraycopy_id: + aligned = false; + break; + case arrayof_jbyte_disjoint_arraycopy_id: + aligned = true; + break; + default: + ShouldNotReachHere(); + } + + StubCodeMark mark(this, stub_id); address start = __ function_entry(); assert_positive_int(R5_ARG3); @@ -1072,8 +1032,20 @@ class StubGenerator: public StubCodeGenerator { // to: R4_ARG2 // count: R5_ARG3 treated as signed // - address generate_conjoint_byte_copy(bool aligned, const char * name) { - StubCodeMark mark(this, "StubRoutines", name); + address generate_conjoint_byte_copy(StubGenStubId stub_id) { + bool aligned; + switch (stub_id) { + case jbyte_arraycopy_id: + aligned = false; + break; + case arrayof_jbyte_arraycopy_id: + aligned = true; + break; + default: + ShouldNotReachHere(); + } + + StubCodeMark mark(this, stub_id); address start = __ function_entry(); assert_positive_int(R5_ARG3); @@ -1161,8 +1133,20 @@ class StubGenerator: public StubCodeGenerator { // // 1. check if aligning the backbranch target of loops is beneficial // - address generate_disjoint_short_copy(bool aligned, const char * name) { - StubCodeMark mark(this, "StubRoutines", name); + address generate_disjoint_short_copy(StubGenStubId stub_id) { + bool aligned; + switch (stub_id) { + case jshort_disjoint_arraycopy_id: + aligned = false; + break; + case arrayof_jshort_disjoint_arraycopy_id: + aligned = true; + break; + default: + ShouldNotReachHere(); + } + + StubCodeMark mark(this, stub_id); Register tmp1 = R6_ARG4; Register tmp2 = R7_ARG5; @@ -1344,8 +1328,20 @@ class StubGenerator: public StubCodeGenerator { // to: R4_ARG2 // count: R5_ARG3 treated as signed // - address generate_conjoint_short_copy(bool aligned, const char * name) { - StubCodeMark mark(this, "StubRoutines", name); + address generate_conjoint_short_copy(StubGenStubId stub_id) { + bool aligned; + switch (stub_id) { + case jshort_arraycopy_id: + aligned = false; + break; + case arrayof_jshort_arraycopy_id: + aligned = true; + break; + default: + ShouldNotReachHere(); + } + + StubCodeMark mark(this, stub_id); address start = __ function_entry(); assert_positive_int(R5_ARG3); @@ -1515,8 +1511,20 @@ class StubGenerator: public StubCodeGenerator { // to: R4_ARG2 // count: R5_ARG3 treated as signed // - address generate_disjoint_int_copy(bool aligned, const char * name) { - StubCodeMark mark(this, "StubRoutines", name); + address generate_disjoint_int_copy(StubGenStubId stub_id) { + bool aligned; + switch (stub_id) { + case jint_disjoint_arraycopy_id: + aligned = false; + break; + case arrayof_jint_disjoint_arraycopy_id: + aligned = true; + break; + default: + ShouldNotReachHere(); + } + + StubCodeMark mark(this, stub_id); address start = __ function_entry(); assert_positive_int(R5_ARG3); { @@ -1662,8 +1670,20 @@ class StubGenerator: public StubCodeGenerator { // to: R4_ARG2 // count: R5_ARG3 treated as signed // - address generate_conjoint_int_copy(bool aligned, const char * name) { - StubCodeMark mark(this, "StubRoutines", name); + address generate_conjoint_int_copy(StubGenStubId stub_id) { + bool aligned; + switch (stub_id) { + case jint_arraycopy_id: + aligned = false; + break; + case arrayof_jint_arraycopy_id: + aligned = true; + break; + default: + ShouldNotReachHere(); + } + + StubCodeMark mark(this, stub_id); address start = __ function_entry(); assert_positive_int(R5_ARG3); address nooverlap_target = aligned ? @@ -1793,8 +1813,20 @@ class StubGenerator: public StubCodeGenerator { // to: R4_ARG2 // count: R5_ARG3 treated as signed // - address generate_disjoint_long_copy(bool aligned, const char * name) { - StubCodeMark mark(this, "StubRoutines", name); + address generate_disjoint_long_copy(StubGenStubId stub_id) { + bool aligned; + switch (stub_id) { + case jlong_disjoint_arraycopy_id: + aligned = false; + break; + case arrayof_jlong_disjoint_arraycopy_id: + aligned = true; + break; + default: + ShouldNotReachHere(); + } + + StubCodeMark mark(this, stub_id); address start = __ function_entry(); assert_positive_int(R5_ARG3); { @@ -1919,8 +1951,20 @@ class StubGenerator: public StubCodeGenerator { // to: R4_ARG2 // count: R5_ARG3 treated as signed // - address generate_conjoint_long_copy(bool aligned, const char * name) { - StubCodeMark mark(this, "StubRoutines", name); + address generate_conjoint_long_copy(StubGenStubId stub_id) { + bool aligned; + switch (stub_id) { + case jlong_arraycopy_id: + aligned = false; + break; + case arrayof_jlong_arraycopy_id: + aligned = true; + break; + default: + ShouldNotReachHere(); + } + + StubCodeMark mark(this, stub_id); address start = __ function_entry(); assert_positive_int(R5_ARG3); address nooverlap_target = aligned ? @@ -1948,9 +1992,31 @@ class StubGenerator: public StubCodeGenerator { // count: R5_ARG3 treated as signed // dest_uninitialized: G1 support // - address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) { - StubCodeMark mark(this, "StubRoutines", name); + address generate_conjoint_oop_copy(StubGenStubId stub_id) { + bool aligned; + bool dest_uninitialized; + switch (stub_id) { + case oop_arraycopy_id: + aligned = false; + dest_uninitialized = false; + break; + case arrayof_oop_arraycopy_id: + aligned = true; + dest_uninitialized = false; + break; + case oop_arraycopy_uninit_id: + aligned = false; + dest_uninitialized = true; + break; + case arrayof_oop_arraycopy_uninit_id: + aligned = true; + dest_uninitialized = true; + break; + default: + ShouldNotReachHere(); + } + StubCodeMark mark(this, stub_id); address start = __ function_entry(); assert_positive_int(R5_ARG3); address nooverlap_target = aligned ? @@ -1997,8 +2063,31 @@ class StubGenerator: public StubCodeGenerator { // count: R5_ARG3 treated as signed // dest_uninitialized: G1 support // - address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) { - StubCodeMark mark(this, "StubRoutines", name); + address generate_disjoint_oop_copy(StubGenStubId stub_id) { + bool aligned; + bool dest_uninitialized; + switch (stub_id) { + case oop_disjoint_arraycopy_id: + aligned = false; + dest_uninitialized = false; + break; + case arrayof_oop_disjoint_arraycopy_id: + aligned = true; + dest_uninitialized = false; + break; + case oop_disjoint_arraycopy_uninit_id: + aligned = false; + dest_uninitialized = true; + break; + case arrayof_oop_disjoint_arraycopy_uninit_id: + aligned = true; + dest_uninitialized = true; + break; + default: + ShouldNotReachHere(); + } + + StubCodeMark mark(this, stub_id); address start = __ function_entry(); assert_positive_int(R5_ARG3); @@ -2066,8 +2155,7 @@ class StubGenerator: public StubCodeGenerator { // ckval: R7 (super_klass) // ret: R3 zero for success; (-1^K) where K is partial transfer count // - address generate_checkcast_copy(const char *name, bool dest_uninitialized) { - + address generate_checkcast_copy(StubGenStubId stub_id) { const Register R3_from = R3_ARG1; // source array address const Register R4_to = R4_ARG2; // destination array address const Register R5_count = R5_ARG3; // elements count @@ -2081,8 +2169,19 @@ class StubGenerator: public StubCodeGenerator { const Register R12_tmp = R12_scratch2; const Register R2_tmp = R2; + bool dest_uninitialized; + switch (stub_id) { + case checkcast_arraycopy_id: + dest_uninitialized = false; + break; + case checkcast_arraycopy_uninit_id: + dest_uninitialized = true; + break; + default: + ShouldNotReachHere(); + } //__ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ function_entry(); // Assert that int is 64 bit sign extended and arrays are not conjoint. @@ -2201,8 +2300,7 @@ class StubGenerator: public StubCodeGenerator { // Examines the alignment of the operands and dispatches // to a long, int, short, or byte copy loop. // - address generate_unsafe_copy(const char* name, - address byte_copy_entry, + address generate_unsafe_copy(address byte_copy_entry, address short_copy_entry, address int_copy_entry, address long_copy_entry) { @@ -2215,7 +2313,8 @@ class StubGenerator: public StubCodeGenerator { const Register R7_tmp = R7_ARG5; //__ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubGenStubId stub_id = StubGenStubId::unsafe_arraycopy_id; + StubCodeMark mark(this, stub_id); address start = __ function_entry(); // Bump this on entry, not on exit: @@ -2298,8 +2397,7 @@ class StubGenerator: public StubCodeGenerator { // R3 == 0 - success // R3 == -1 - need to call System.arraycopy // - address generate_generic_copy(const char *name, - address entry_jbyte_arraycopy, + address generate_generic_copy(address entry_jbyte_arraycopy, address entry_jshort_arraycopy, address entry_jint_arraycopy, address entry_oop_arraycopy, @@ -2322,7 +2420,8 @@ class StubGenerator: public StubCodeGenerator { const Register temp = R2; //__ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubGenStubId stub_id = StubGenStubId::generic_arraycopy_id; + StubCodeMark mark(this, stub_id); address start = __ function_entry(); // Bump this on entry, not on exit: @@ -2524,7 +2623,8 @@ class StubGenerator: public StubCodeGenerator { // R5_ARG3 - round key array address generate_aescrypt_encryptBlock() { assert(UseAES, "need AES instructions and misaligned SSE support"); - StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); + StubGenStubId stub_id = StubGenStubId::aescrypt_encryptBlock_id; + StubCodeMark mark(this, stub_id); address start = __ function_entry(); @@ -2731,7 +2831,8 @@ class StubGenerator: public StubCodeGenerator { // R5_ARG3 - K (key) in little endian int array address generate_aescrypt_decryptBlock() { assert(UseAES, "need AES instructions and misaligned SSE support"); - StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); + StubGenStubId stub_id = StubGenStubId::aescrypt_decryptBlock_id; + StubCodeMark mark(this, stub_id); address start = __ function_entry(); @@ -2968,9 +3069,20 @@ class StubGenerator: public StubCodeGenerator { return start; } - address generate_sha256_implCompress(bool multi_block, const char *name) { + address generate_sha256_implCompress(StubGenStubId stub_id) { assert(UseSHA, "need SHA instructions"); - StubCodeMark mark(this, "StubRoutines", name); + bool multi_block; + switch (stub_id) { + case sha256_implCompress_id: + multi_block = false; + break; + case sha256_implCompressMB_id: + multi_block = true; + break; + default: + ShouldNotReachHere(); + } + StubCodeMark mark(this, stub_id); address start = __ function_entry(); __ sha256 (multi_block); @@ -2979,9 +3091,20 @@ class StubGenerator: public StubCodeGenerator { return start; } - address generate_sha512_implCompress(bool multi_block, const char *name) { + address generate_sha512_implCompress(StubGenStubId stub_id) { assert(UseSHA, "need SHA instructions"); - StubCodeMark mark(this, "StubRoutines", name); + bool multi_block; + switch (stub_id) { + case sha512_implCompress_id: + multi_block = false; + break; + case sha512_implCompressMB_id: + multi_block = true; + break; + default: + ShouldNotReachHere(); + } + StubCodeMark mark(this, stub_id); address start = __ function_entry(); __ sha512 (multi_block); @@ -2992,7 +3115,8 @@ class StubGenerator: public StubCodeGenerator { address generate_data_cache_writeback() { const Register cacheline = R3_ARG1; - StubCodeMark mark(this, "StubRoutines", "_data_cache_writeback"); + StubGenStubId stub_id = StubGenStubId::data_cache_writeback_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ cache_wb(Address(cacheline)); @@ -3005,8 +3129,8 @@ class StubGenerator: public StubCodeGenerator { const Register is_presync = R3_ARG1; Register temp = R4; Label SKIP; - - StubCodeMark mark(this, "StubRoutines", "_data_cache_writeback_sync"); + StubGenStubId stub_id = StubGenStubId::data_cache_writeback_sync_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ andi_(temp, is_presync, 1); @@ -3026,48 +3150,46 @@ class StubGenerator: public StubCodeGenerator { UnsafeMemoryAccess::set_common_exit_stub_pc(ucm_common_error_exit); // non-aligned disjoint versions - StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy"); - StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy"); - StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, "jint_disjoint_arraycopy"); - StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_copy(false, "jlong_disjoint_arraycopy"); - StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy", false); - StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy_uninit", true); + StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(StubGenStubId::jbyte_disjoint_arraycopy_id); + StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(StubGenStubId::jshort_disjoint_arraycopy_id); + StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(StubGenStubId::jint_disjoint_arraycopy_id); + StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_copy(StubGenStubId::jlong_disjoint_arraycopy_id); + StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy(StubGenStubId::oop_disjoint_arraycopy_id); + StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(StubGenStubId::oop_disjoint_arraycopy_uninit_id); // aligned disjoint versions - StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, "arrayof_jbyte_disjoint_arraycopy"); - StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, "arrayof_jshort_disjoint_arraycopy"); - StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, "arrayof_jint_disjoint_arraycopy"); - StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, "arrayof_jlong_disjoint_arraycopy"); - StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy(true, "arrayof_oop_disjoint_arraycopy", false); - StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(true, "oop_disjoint_arraycopy_uninit", true); + StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(StubGenStubId::arrayof_jbyte_disjoint_arraycopy_id); + StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(StubGenStubId::arrayof_jshort_disjoint_arraycopy_id); + StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(StubGenStubId::arrayof_jint_disjoint_arraycopy_id); + StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(StubGenStubId::arrayof_jlong_disjoint_arraycopy_id); + StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy(StubGenStubId::arrayof_oop_disjoint_arraycopy_id); + StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(StubGenStubId::oop_disjoint_arraycopy_uninit_id); // non-aligned conjoint versions - StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, "jbyte_arraycopy"); - StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, "jshort_arraycopy"); - StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, "jint_arraycopy"); - StubRoutines::_jlong_arraycopy = generate_conjoint_long_copy(false, "jlong_arraycopy"); - StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy(false, "oop_arraycopy", false); - StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(false, "oop_arraycopy_uninit", true); + StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(StubGenStubId::jbyte_arraycopy_id); + StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(StubGenStubId::jshort_arraycopy_id); + StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(StubGenStubId::jint_arraycopy_id); + StubRoutines::_jlong_arraycopy = generate_conjoint_long_copy(StubGenStubId::jlong_arraycopy_id); + StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy(StubGenStubId::oop_arraycopy_id); + StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(StubGenStubId::oop_arraycopy_uninit_id); // aligned conjoint versions - StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, "arrayof_jbyte_arraycopy"); - StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, "arrayof_jshort_arraycopy"); - StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(true, "arrayof_jint_arraycopy"); - StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_long_copy(true, "arrayof_jlong_arraycopy"); - StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", false); - StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", true); + StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(StubGenStubId::arrayof_jbyte_arraycopy_id); + StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(StubGenStubId::arrayof_jshort_arraycopy_id); + StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(StubGenStubId::arrayof_jint_arraycopy_id); + StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_long_copy(StubGenStubId::arrayof_jlong_arraycopy_id); + StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy(StubGenStubId::arrayof_oop_arraycopy_id); + StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(StubGenStubId::arrayof_oop_arraycopy_id); // special/generic versions - StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", false); - StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", true); + StubRoutines::_checkcast_arraycopy = generate_checkcast_copy(StubGenStubId::checkcast_arraycopy_id); + StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy(StubGenStubId::checkcast_arraycopy_uninit_id); - StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", - STUB_ENTRY(jbyte_arraycopy()), + StubRoutines::_unsafe_arraycopy = generate_unsafe_copy(STUB_ENTRY(jbyte_arraycopy()), STUB_ENTRY(jshort_arraycopy()), STUB_ENTRY(jint_arraycopy()), STUB_ENTRY(jlong_arraycopy())); - StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", - STUB_ENTRY(jbyte_arraycopy()), + StubRoutines::_generic_arraycopy = generate_generic_copy(STUB_ENTRY(jbyte_arraycopy()), STUB_ENTRY(jshort_arraycopy()), STUB_ENTRY(jint_arraycopy()), STUB_ENTRY(oop_arraycopy()), @@ -3078,12 +3200,12 @@ class StubGenerator: public StubCodeGenerator { // fill routines #ifdef COMPILER2 if (OptimizeFill) { - StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); - StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); - StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); - StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); - StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); - StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); + StubRoutines::_jbyte_fill = generate_fill(StubGenStubId::jbyte_fill_id); + StubRoutines::_jshort_fill = generate_fill(StubGenStubId::jshort_fill_id); + StubRoutines::_jint_fill = generate_fill(StubGenStubId::jint_fill_id); + StubRoutines::_arrayof_jbyte_fill = generate_fill(StubGenStubId::arrayof_jbyte_fill_id); + StubRoutines::_arrayof_jshort_fill = generate_fill(StubGenStubId::arrayof_jshort_fill_id); + StubRoutines::_arrayof_jint_fill = generate_fill(StubGenStubId::arrayof_jint_fill_id); } #endif } @@ -3101,7 +3223,8 @@ class StubGenerator: public StubCodeGenerator { // address generate_multiplyToLen() { - StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); + StubGenStubId stub_id = StubGenStubId::multiplyToLen_id; + StubCodeMark mark(this, stub_id); address start = __ function_entry(); @@ -3177,7 +3300,8 @@ class StubGenerator: public StubCodeGenerator { */ address generate_mulAdd() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "mulAdd"); + StubGenStubId stub_id = StubGenStubId::mulAdd_id; + StubCodeMark mark(this, stub_id); address start = __ function_entry(); @@ -3207,7 +3331,8 @@ class StubGenerator: public StubCodeGenerator { */ address generate_squareToLen() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "squareToLen"); + StubGenStubId stub_id = StubGenStubId::squareToLen_id; + StubCodeMark mark(this, stub_id); address start = __ function_entry(); @@ -3440,9 +3565,20 @@ class StubGenerator: public StubCodeGenerator { * R3_RET - int crc result */ // Compute CRC32 function. - address generate_CRC32_updateBytes(bool is_crc32c) { + address generate_CRC32_updateBytes(StubGenStubId stub_id) { + bool is_crc32c; + switch (stub_id) { + case updateBytesCRC32_id: + is_crc32c = false; + break; + case updateBytesCRC32C_id: + is_crc32c = true; + break; + default: + ShouldNotReachHere(); + } __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", is_crc32c ? "CRC32C_updateBytes" : "CRC32_updateBytes"); + StubCodeMark mark(this, stub_id); address start = __ function_entry(); // Remember stub start address (is rtn value). __ crc32(R3_ARG1, R4_ARG2, R5_ARG3, R2, R6, R7, R8, R9, R10, R11, R12, is_crc32c); __ blr(); @@ -3469,7 +3605,8 @@ class StubGenerator: public StubCodeGenerator { address generate_method_entry_barrier() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "nmethod_entry_barrier"); + StubGenStubId stub_id = StubGenStubId::method_entry_barrier_id; + StubCodeMark mark(this, stub_id); address stub_address = __ pc(); @@ -3566,7 +3703,8 @@ class StubGenerator: public StubCodeGenerator { // Base64 decodeBlock intrinsic address generate_base64_decodeBlock() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "base64_decodeBlock"); + StubGenStubId stub_id = StubGenStubId::base64_decodeBlock_id; + StubCodeMark mark(this, stub_id); address start = __ function_entry(); typedef struct { @@ -4154,7 +4292,8 @@ class StubGenerator: public StubCodeGenerator { address generate_base64_encodeBlock() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "base64_encodeBlock"); + StubGenStubId stub_id = StubGenStubId::base64_encodeBlock_id; + StubCodeMark mark(this, stub_id); address start = __ function_entry(); typedef struct { @@ -4443,10 +4582,10 @@ class StubGenerator: public StubCodeGenerator { #endif // VM_LITTLE_ENDIAN -address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) { - StubCodeMark mark(this, "StubRoutines", "lookup_secondary_supers_table"); +void generate_lookup_secondary_supers_table_stub() { + StubGenStubId stub_id = StubGenStubId::lookup_secondary_supers_table_id; + StubCodeMark mark(this, stub_id); - address start = __ pc(); const Register r_super_klass = R4_ARG2, r_array_base = R3_ARG1, @@ -4456,17 +4595,19 @@ address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) { r_bitmap = R11_scratch1, result = R8_ARG6; - __ lookup_secondary_supers_table_const(r_sub_klass, r_super_klass, - r_array_base, r_array_length, r_array_index, - r_bitmap, result, super_klass_index); - __ blr(); - - return start; + for (int slot = 0; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) { + StubRoutines::_lookup_secondary_supers_table_stubs[slot] = __ pc(); + __ lookup_secondary_supers_table_const(r_sub_klass, r_super_klass, + r_array_base, r_array_length, r_array_index, + r_bitmap, result, slot); + __ blr(); + } } // Slow path implementation for UseSecondarySupersTable. address generate_lookup_secondary_supers_table_slow_path_stub() { - StubCodeMark mark(this, "StubRoutines", "lookup_secondary_supers_table_slow_path"); + StubGenStubId stub_id = StubGenStubId::lookup_secondary_supers_table_slow_path_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); const Register @@ -4483,13 +4624,33 @@ address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) { return start; } - address generate_cont_thaw(const char* label, Continuation::thaw_kind kind) { + address generate_cont_thaw(StubGenStubId stub_id) { if (!Continuations::enabled()) return nullptr; - bool return_barrier = Continuation::is_thaw_return_barrier(kind); - bool return_barrier_exception = Continuation::is_thaw_return_barrier_exception(kind); + Continuation::thaw_kind kind; + bool return_barrier; + bool return_barrier_exception; - StubCodeMark mark(this, "StubRoutines", label); + switch (stub_id) { + case cont_thaw_id: + kind = Continuation::thaw_top; + return_barrier = false; + return_barrier_exception = false; + break; + case cont_returnBarrier_id: + kind = Continuation::thaw_return_barrier; + return_barrier = true; + return_barrier_exception = false; + break; + case cont_returnBarrierExc_id: + kind = Continuation::thaw_return_barrier_exception; + return_barrier = true; + return_barrier_exception = true; + break; + default: + ShouldNotReachHere(); + } + StubCodeMark mark(this, stub_id); Register tmp1 = R10_ARG8; Register tmp2 = R9_ARG7; @@ -4578,22 +4739,23 @@ address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) { } address generate_cont_thaw() { - return generate_cont_thaw("Cont thaw", Continuation::thaw_top); + return generate_cont_thaw(StubGenStubId::cont_thaw_id); } // TODO: will probably need multiple return barriers depending on return type address generate_cont_returnBarrier() { - return generate_cont_thaw("Cont thaw return barrier", Continuation::thaw_return_barrier); + return generate_cont_thaw(StubGenStubId::cont_returnBarrier_id); } address generate_cont_returnBarrier_exception() { - return generate_cont_thaw("Cont thaw return barrier exception", Continuation::thaw_return_barrier_exception); + return generate_cont_thaw(StubGenStubId::cont_returnBarrierExc_id); } address generate_cont_preempt_stub() { if (!Continuations::enabled()) return nullptr; - StubCodeMark mark(this, "StubRoutines","Continuation preempt stub"); + StubGenStubId stub_id = StubGenStubId::cont_preempt_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ clobber_nonvolatile_registers(); // Except R16_thread and R29_TOC @@ -4628,7 +4790,8 @@ address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) { // exception handler for upcall stubs address generate_upcall_stub_exception_handler() { - StubCodeMark mark(this, "StubRoutines", "upcall stub exception handler"); + StubGenStubId stub_id = StubGenStubId::upcall_stub_exception_handler_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); // Native caller has no idea how to handle exceptions, @@ -4646,7 +4809,8 @@ address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) { // R19_method = result Method* address generate_upcall_stub_load_target() { - StubCodeMark mark(this, "StubRoutines", "upcall_stub_load_target"); + StubGenStubId stub_id = StubGenStubId::upcall_stub_load_target_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ resolve_global_jobject(R3_ARG1, R22_tmp2, R23_tmp3, MacroAssembler::PRESERVATION_FRAME_LR_GP_FP_REGS); @@ -4686,13 +4850,13 @@ address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) { // CRC32 Intrinsics. if (UseCRC32Intrinsics) { StubRoutines::_crc_table_adr = StubRoutines::ppc::generate_crc_constants(REVERSE_CRC32_POLY); - StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes(false); + StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes(StubGenStubId::updateBytesCRC32_id); } // CRC32C Intrinsics. if (UseCRC32CIntrinsics) { StubRoutines::_crc32c_table_addr = StubRoutines::ppc::generate_crc_constants(REVERSE_CRC32C_POLY); - StubRoutines::_updateBytesCRC32C = generate_CRC32_updateBytes(true); + StubRoutines::_updateBytesCRC32C = generate_CRC32_updateBytes(StubGenStubId::updateBytesCRC32C_id); } if (VM_Version::supports_float16()) { @@ -4728,10 +4892,7 @@ address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) { if (UseSecondarySupersTable) { StubRoutines::_lookup_secondary_supers_table_slow_path_stub = generate_lookup_secondary_supers_table_slow_path_stub(); if (!InlineSecondarySupersTest) { - for (int slot = 0; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) { - StubRoutines::_lookup_secondary_supers_table_stubs[slot] - = generate_lookup_secondary_supers_table_stub(slot); - } + generate_lookup_secondary_supers_table_stub(); } } @@ -4774,12 +4935,12 @@ address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) { } if (UseSHA256Intrinsics) { - StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress"); - StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB"); + StubRoutines::_sha256_implCompress = generate_sha256_implCompress(StubGenStubId::sha256_implCompress_id); + StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(StubGenStubId::sha256_implCompressMB_id); } if (UseSHA512Intrinsics) { - StubRoutines::_sha512_implCompress = generate_sha512_implCompress(false, "sha512_implCompress"); - StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB"); + StubRoutines::_sha512_implCompress = generate_sha512_implCompress(StubGenStubId::sha512_implCompress_id); + StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(StubGenStubId::sha512_implCompressMB_id); } #ifdef VM_LITTLE_ENDIAN @@ -4793,27 +4954,28 @@ address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) { } public: - StubGenerator(CodeBuffer* code, StubsKind kind) : StubCodeGenerator(code) { - switch(kind) { - case Initial_stubs: + StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) { + switch(blob_id) { + case initial_id: generate_initial_stubs(); break; - case Continuation_stubs: + case continuation_id: generate_continuation_stubs(); break; - case Compiler_stubs: + case compiler_id: generate_compiler_stubs(); break; - case Final_stubs: + case final_id: generate_final_stubs(); break; default: - fatal("unexpected stubs kind: %d", kind); + fatal("unexpected blob id: %d", blob_id); break; }; } }; -void StubGenerator_generate(CodeBuffer* code, StubCodeGenerator::StubsKind kind) { - StubGenerator g(code, kind); +void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id) { + StubGenerator g(code, blob_id); } + diff --git a/src/hotspot/cpu/ppc/stubRoutines_ppc.hpp b/src/hotspot/cpu/ppc/stubRoutines_ppc.hpp index 4db0227e81b4e..a542d7947f85f 100644 --- a/src/hotspot/cpu/ppc/stubRoutines_ppc.hpp +++ b/src/hotspot/cpu/ppc/stubRoutines_ppc.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2019 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -32,14 +32,17 @@ static bool returns_to_call_stub(address return_pc) { return return_pc == _call_stub_return_address; } +// emit enum used to size per-blob code buffers + +#define DEFINE_BLOB_SIZE(blob_name, size) \ + _ ## blob_name ## _code_size = size, + enum platform_dependent_constants { - // simply increase sizes if too small (assembler will crash if too small) - _initial_stubs_code_size = 20000, - _continuation_stubs_code_size = 2000, - _compiler_stubs_code_size = 24000, - _final_stubs_code_size = 24000 + STUBGEN_ARCH_BLOBS_DO(DEFINE_BLOB_SIZE) }; +#undef DEFINE_BLOB_SIZE + // CRC32 Intrinsics. #define CRC32_TABLE_SIZE (4 * 256) #define REVERSE_CRC32_POLY 0xEDB88320 diff --git a/src/hotspot/cpu/riscv/stubDeclarations_riscv.hpp b/src/hotspot/cpu/riscv/stubDeclarations_riscv.hpp new file mode 100644 index 0000000000000..4905566c233a3 --- /dev/null +++ b/src/hotspot/cpu/riscv/stubDeclarations_riscv.hpp @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2025, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_RISCV_STUBDECLARATIONS_HPP +#define CPU_RISCV_STUBDECLARATIONS_HPP + +#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \ + do_arch_blob, \ + do_arch_entry, \ + do_arch_entry_init) \ + do_arch_blob(initial, 10000) \ + + +#define STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, \ + do_arch_blob, \ + do_arch_entry, \ + do_arch_entry_init) \ + do_arch_blob(continuation, 2000) \ + + +#define STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, \ + do_arch_blob, \ + do_arch_entry, \ + do_arch_entry_init) \ + do_arch_blob(compiler, 45000) \ + do_stub(compiler, compare_long_string_LL) \ + do_arch_entry(riscv, compiler, compare_long_string_LL, \ + compare_long_string_LL, compare_long_string_LL) \ + do_stub(compiler, compare_long_string_UU) \ + do_arch_entry(riscv, compiler, compare_long_string_UU, \ + compare_long_string_UU, compare_long_string_UU) \ + do_stub(compiler, compare_long_string_LU) \ + do_arch_entry(riscv, compiler, compare_long_string_LU, \ + compare_long_string_LU, compare_long_string_LU) \ + do_stub(compiler, compare_long_string_UL) \ + do_arch_entry(riscv, compiler, compare_long_string_UL, \ + compare_long_string_UL, compare_long_string_UL) \ + do_stub(compiler, string_indexof_linear_ll) \ + do_arch_entry(riscv, compiler, string_indexof_linear_ll, \ + string_indexof_linear_ll, string_indexof_linear_ll) \ + do_stub(compiler, string_indexof_linear_uu) \ + do_arch_entry(riscv, compiler, string_indexof_linear_uu, \ + string_indexof_linear_uu, string_indexof_linear_uu) \ + do_stub(compiler, string_indexof_linear_ul) \ + do_arch_entry(riscv, compiler, string_indexof_linear_ul, \ + string_indexof_linear_ul, string_indexof_linear_ul) \ + + +#define STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, \ + do_arch_blob, \ + do_arch_entry, \ + do_arch_entry_init) \ + do_arch_blob(final, 20000 ZGC_ONLY(+10000)) \ + do_stub(final, copy_byte_f) \ + do_arch_entry(riscv, final, copy_byte_f, copy_byte_f, \ + copy_byte_f) \ + do_stub(final, copy_byte_b) \ + do_arch_entry(riscv, final, copy_byte_b, copy_byte_b, \ + copy_byte_b) \ + do_stub(final, zero_blocks) \ + do_arch_entry(riscv, final, zero_blocks, zero_blocks, \ + zero_blocks) \ + + +#endif // CPU_RISCV_STUBDECLARATIONS_HPP diff --git a/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp b/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp index afd1f691f34fa..d9880b1f85bad 100644 --- a/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp +++ b/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. + * Copyright (c) 2014, 2025, Red Hat Inc. All rights reserved. * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -207,7 +207,8 @@ class StubGenerator: public StubCodeGenerator { (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, "adjust this code"); - StubCodeMark mark(this, "StubRoutines", "call_stub"); + StubGenStubId stub_id = StubGenStubId::call_stub_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); const Address sp_after_call (fp, sp_after_call_off * wordSize); @@ -475,7 +476,8 @@ class StubGenerator: public StubCodeGenerator { // x10: exception oop address generate_catch_exception() { - StubCodeMark mark(this, "StubRoutines", "catch_exception"); + StubGenStubId stub_id = StubGenStubId::catch_exception_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); // same as in generate_call_stub(): @@ -527,7 +529,8 @@ class StubGenerator: public StubCodeGenerator { // so it just needs to be generated code with no x86 prolog address generate_forward_exception() { - StubCodeMark mark(this, "StubRoutines", "forward exception"); + StubGenStubId stub_id = StubGenStubId::forward_exception_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); // Upon entry, RA points to the return address returning into @@ -613,7 +616,8 @@ class StubGenerator: public StubCodeGenerator { // [tos + 5]: saved t0 address generate_verify_oop() { - StubCodeMark mark(this, "StubRoutines", "verify_oop"); + StubGenStubId stub_id = StubGenStubId::verify_oop_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); Label exit, error; @@ -674,7 +678,8 @@ class StubGenerator: public StubCodeGenerator { const Register base = x28, cnt = x29, tmp1 = x30, tmp2 = x31; __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "zero_blocks"); + StubGenStubId stub_id = StubGenStubId::zero_blocks_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); if (UseBlockZeroing) { @@ -726,8 +731,22 @@ class StubGenerator: public StubCodeGenerator { // // s and d are adjusted to point to the remaining words to copy // - void generate_copy_longs(Label &start, Register s, Register d, Register count, - copy_direction direction) { + void generate_copy_longs(StubGenStubId stub_id, Label &start, + Register s, Register d, Register count) { + BasicType type; + copy_direction direction; + switch (stub_id) { + case copy_byte_f_id: + direction = copy_forwards; + type = T_BYTE; + break; + case copy_byte_b_id: + direction = copy_backwards; + type = T_BYTE; + break; + default: + ShouldNotReachHere(); + } int unit = wordSize * direction; int bias = wordSize; @@ -741,13 +760,7 @@ class StubGenerator: public StubCodeGenerator { assert_different_registers(s, d, count, t0); Label again, drain; - const char* stub_name = nullptr; - if (direction == copy_forwards) { - stub_name = "forward_copy_longs"; - } else { - stub_name = "backward_copy_longs"; - } - StubCodeMark mark(this, "StubRoutines", stub_name); + StubCodeMark mark(this, stub_id); __ align(CodeEntryAlignment); __ bind(start); @@ -1082,10 +1095,11 @@ class StubGenerator: public StubCodeGenerator { } // Arguments: - // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary - // ignored - // is_oop - true => oop array, so generate store check code - // name - stub name string + // stub_id - is used to name the stub and identify all details of + // how to perform the copy. + // + // entry - is assigned to the stub's post push entry point unless + // it is null // // Inputs: // c_rarg0 - source array address @@ -1096,16 +1110,96 @@ class StubGenerator: public StubCodeGenerator { // the hardware handle it. The two dwords within qwords that span // cache line boundaries will still be loaded and stored atomically. // - // Side Effects: - // disjoint_int_copy_entry is set to the no-overlap entry point - // used by generate_conjoint_int_oop_copy(). + // Side Effects: entry is set to the (post push) entry point so it + // can be used by the corresponding conjoint copy + // method // - address generate_disjoint_copy(size_t size, bool aligned, bool is_oop, address* entry, - const char* name, bool dest_uninitialized = false) { + address generate_disjoint_copy(StubGenStubId stub_id, address* entry) { + size_t size; + bool aligned; + bool is_oop; + bool dest_uninitialized; + switch (stub_id) { + case jbyte_disjoint_arraycopy_id: + size = sizeof(jbyte); + aligned = false; + is_oop = false; + dest_uninitialized = false; + break; + case arrayof_jbyte_disjoint_arraycopy_id: + size = sizeof(jbyte); + aligned = true; + is_oop = false; + dest_uninitialized = false; + break; + case jshort_disjoint_arraycopy_id: + size = sizeof(jshort); + aligned = false; + is_oop = false; + dest_uninitialized = false; + break; + case arrayof_jshort_disjoint_arraycopy_id: + size = sizeof(jshort); + aligned = true; + is_oop = false; + dest_uninitialized = false; + break; + case jint_disjoint_arraycopy_id: + size = sizeof(jint); + aligned = false; + is_oop = false; + dest_uninitialized = false; + break; + case arrayof_jint_disjoint_arraycopy_id: + size = sizeof(jint); + aligned = true; + is_oop = false; + dest_uninitialized = false; + break; + case jlong_disjoint_arraycopy_id: + // since this is always aligned we can (should!) use the same + // stub as for case arrayof_jlong_disjoint_arraycopy + ShouldNotReachHere(); + break; + case arrayof_jlong_disjoint_arraycopy_id: + size = sizeof(jlong); + aligned = true; + is_oop = false; + dest_uninitialized = false; + break; + case oop_disjoint_arraycopy_id: + size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); + aligned = !UseCompressedOops; + is_oop = true; + dest_uninitialized = false; + break; + case arrayof_oop_disjoint_arraycopy_id: + size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); + aligned = !UseCompressedOops; + is_oop = true; + dest_uninitialized = false; + break; + case oop_disjoint_arraycopy_uninit_id: + size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); + aligned = !UseCompressedOops; + is_oop = true; + dest_uninitialized = true; + break; + case arrayof_oop_disjoint_arraycopy_uninit_id: + size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); + aligned = !UseCompressedOops; + is_oop = true; + dest_uninitialized = true; + break; + default: + ShouldNotReachHere(); + break; + } + const Register s = c_rarg0, d = c_rarg1, count = c_rarg2; RegSet saved_reg = RegSet::of(s, d, count); __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -1154,10 +1248,15 @@ class StubGenerator: public StubCodeGenerator { } // Arguments: - // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary - // ignored - // is_oop - true => oop array, so generate store check code - // name - stub name string + // stub_id - is used to name the stub and identify all details of + // how to perform the copy. + // + // nooverlap_target - identifes the (post push) entry for the + // corresponding disjoint copy routine which can be + // jumped to if the ranges do not actually overlap + // + // entry - is assigned to the stub's post push entry point unless + // it is null // // Inputs: // c_rarg0 - source array address @@ -1168,12 +1267,94 @@ class StubGenerator: public StubCodeGenerator { // the hardware handle it. The two dwords within qwords that span // cache line boundaries will still be loaded and stored atomically. // - address generate_conjoint_copy(size_t size, bool aligned, bool is_oop, address nooverlap_target, - address* entry, const char* name, - bool dest_uninitialized = false) { + // Side Effects: + // entry is set to the no-overlap entry point so it can be used by + // some other conjoint copy method + // + address generate_conjoint_copy(StubGenStubId stub_id, address nooverlap_target, address *entry) { const Register s = c_rarg0, d = c_rarg1, count = c_rarg2; RegSet saved_regs = RegSet::of(s, d, count); - StubCodeMark mark(this, "StubRoutines", name); + int size; + bool aligned; + bool is_oop; + bool dest_uninitialized; + switch (stub_id) { + case jbyte_arraycopy_id: + size = sizeof(jbyte); + aligned = false; + is_oop = false; + dest_uninitialized = false; + break; + case arrayof_jbyte_arraycopy_id: + size = sizeof(jbyte); + aligned = true; + is_oop = false; + dest_uninitialized = false; + break; + case jshort_arraycopy_id: + size = sizeof(jshort); + aligned = false; + is_oop = false; + dest_uninitialized = false; + break; + case arrayof_jshort_arraycopy_id: + size = sizeof(jshort); + aligned = true; + is_oop = false; + dest_uninitialized = false; + break; + case jint_arraycopy_id: + size = sizeof(jint); + aligned = false; + is_oop = false; + dest_uninitialized = false; + break; + case arrayof_jint_arraycopy_id: + size = sizeof(jint); + aligned = true; + is_oop = false; + dest_uninitialized = false; + break; + case jlong_arraycopy_id: + // since this is always aligned we can (should!) use the same + // stub as for case arrayof_jlong_disjoint_arraycopy + ShouldNotReachHere(); + break; + case arrayof_jlong_arraycopy_id: + size = sizeof(jlong); + aligned = true; + is_oop = false; + dest_uninitialized = false; + break; + case oop_arraycopy_id: + size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); + aligned = !UseCompressedOops; + is_oop = true; + dest_uninitialized = false; + break; + case arrayof_oop_arraycopy_id: + size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); + aligned = !UseCompressedOops; + is_oop = true; + dest_uninitialized = false; + break; + case oop_arraycopy_uninit_id: + size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); + aligned = !UseCompressedOops; + is_oop = true; + dest_uninitialized = true; + break; + case arrayof_oop_arraycopy_uninit_id: + size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); + aligned = !UseCompressedOops; + is_oop = true; + dest_uninitialized = true; + break; + default: + ShouldNotReachHere(); + } + + StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -1227,227 +1408,6 @@ class StubGenerator: public StubCodeGenerator { return start; } - // Arguments: - // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary - // ignored - // name - stub name string - // - // Inputs: - // c_rarg0 - source array address - // c_rarg1 - destination array address - // c_rarg2 - element count, treated as ssize_t, can be zero - // - // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, - // we let the hardware handle it. The one to eight bytes within words, - // dwords or qwords that span cache line boundaries will still be loaded - // and stored atomically. - // - // Side Effects: - // disjoint_byte_copy_entry is set to the no-overlap entry point // - // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, - // we let the hardware handle it. The one to eight bytes within words, - // dwords or qwords that span cache line boundaries will still be loaded - // and stored atomically. - // - // Side Effects: - // disjoint_byte_copy_entry is set to the no-overlap entry point - // used by generate_conjoint_byte_copy(). - // - address generate_disjoint_byte_copy(bool aligned, address* entry, const char* name) { - const bool not_oop = false; - return generate_disjoint_copy(sizeof (jbyte), aligned, not_oop, entry, name); - } - - // Arguments: - // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary - // ignored - // name - stub name string - // - // Inputs: - // c_rarg0 - source array address - // c_rarg1 - destination array address - // c_rarg2 - element count, treated as ssize_t, can be zero - // - // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, - // we let the hardware handle it. The one to eight bytes within words, - // dwords or qwords that span cache line boundaries will still be loaded - // and stored atomically. - // - address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, - address* entry, const char* name) { - const bool not_oop = false; - return generate_conjoint_copy(sizeof (jbyte), aligned, not_oop, nooverlap_target, entry, name); - } - - // Arguments: - // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary - // ignored - // name - stub name string - // - // Inputs: - // c_rarg0 - source array address - // c_rarg1 - destination array address - // c_rarg2 - element count, treated as ssize_t, can be zero - // - // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we - // let the hardware handle it. The two or four words within dwords - // or qwords that span cache line boundaries will still be loaded - // and stored atomically. - // - // Side Effects: - // disjoint_short_copy_entry is set to the no-overlap entry point - // used by generate_conjoint_short_copy(). - // - address generate_disjoint_short_copy(bool aligned, - address* entry, const char* name) { - const bool not_oop = false; - return generate_disjoint_copy(sizeof (jshort), aligned, not_oop, entry, name); - } - - // Arguments: - // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary - // ignored - // name - stub name string - // - // Inputs: - // c_rarg0 - source array address - // c_rarg1 - destination array address - // c_rarg2 - element count, treated as ssize_t, can be zero - // - // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we - // let the hardware handle it. The two or four words within dwords - // or qwords that span cache line boundaries will still be loaded - // and stored atomically. - // - address generate_conjoint_short_copy(bool aligned, address nooverlap_target, - address* entry, const char* name) { - const bool not_oop = false; - return generate_conjoint_copy(sizeof (jshort), aligned, not_oop, nooverlap_target, entry, name); - } - - // Arguments: - // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary - // ignored - // name - stub name string - // - // Inputs: - // c_rarg0 - source array address - // c_rarg1 - destination array address - // c_rarg2 - element count, treated as ssize_t, can be zero - // - // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let - // the hardware handle it. The two dwords within qwords that span - // cache line boundaries will still be loaded and stored atomically. - // - // Side Effects: - // disjoint_int_copy_entry is set to the no-overlap entry point - // used by generate_conjoint_int_oop_copy(). - // - address generate_disjoint_int_copy(bool aligned, address* entry, - const char* name, bool dest_uninitialized = false) { - const bool not_oop = false; - return generate_disjoint_copy(sizeof (jint), aligned, not_oop, entry, name); - } - - // Arguments: - // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary - // ignored - // name - stub name string - // - // Inputs: - // c_rarg0 - source array address - // c_rarg1 - destination array address - // c_rarg2 - element count, treated as ssize_t, can be zero - // - // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let - // the hardware handle it. The two dwords within qwords that span - // cache line boundaries will still be loaded and stored atomically. - // - address generate_conjoint_int_copy(bool aligned, address nooverlap_target, - address* entry, const char* name, - bool dest_uninitialized = false) { - const bool not_oop = false; - return generate_conjoint_copy(sizeof (jint), aligned, not_oop, nooverlap_target, entry, name); - } - - - // Arguments: - // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes - // ignored - // name - stub name string - // - // Inputs: - // c_rarg0 - source array address - // c_rarg1 - destination array address - // c_rarg2 - element count, treated as size_t, can be zero - // - // Side Effects: - // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the - // no-overlap entry point used by generate_conjoint_long_oop_copy(). - // - address generate_disjoint_long_copy(bool aligned, address* entry, - const char* name, bool dest_uninitialized = false) { - const bool not_oop = false; - return generate_disjoint_copy(sizeof (jlong), aligned, not_oop, entry, name); - } - - // Arguments: - // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes - // ignored - // name - stub name string - // - // Inputs: - // c_rarg0 - source array address - // c_rarg1 - destination array address - // c_rarg2 - element count, treated as size_t, can be zero - // - address generate_conjoint_long_copy(bool aligned, - address nooverlap_target, address* entry, - const char* name, bool dest_uninitialized = false) { - const bool not_oop = false; - return generate_conjoint_copy(sizeof (jlong), aligned, not_oop, nooverlap_target, entry, name); - } - - // Arguments: - // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes - // ignored - // name - stub name string - // - // Inputs: - // c_rarg0 - source array address - // c_rarg1 - destination array address - // c_rarg2 - element count, treated as size_t, can be zero - // - // Side Effects: - // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the - // no-overlap entry point used by generate_conjoint_long_oop_copy(). - // - address generate_disjoint_oop_copy(bool aligned, address* entry, - const char* name, bool dest_uninitialized) { - const bool is_oop = true; - const size_t size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); - return generate_disjoint_copy(size, aligned, is_oop, entry, name, dest_uninitialized); - } - - // Arguments: - // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes - // ignored - // name - stub name string - // - // Inputs: - // c_rarg0 - source array address - // c_rarg1 - destination array address - // c_rarg2 - element count, treated as size_t, can be zero - // - address generate_conjoint_oop_copy(bool aligned, - address nooverlap_target, address* entry, - const char* name, bool dest_uninitialized) { - const bool is_oop = true; - const size_t size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); - return generate_conjoint_copy(size, aligned, is_oop, nooverlap_target, entry, - name, dest_uninitialized); - } - // Helper for generating a dynamic type check. // Smashes t0, t1. void generate_type_check(Register sub_klass, @@ -1484,8 +1444,19 @@ class StubGenerator: public StubCodeGenerator { // x10 == 0 - success // x10 == -1^K - failure, where K is partial transfer count // - address generate_checkcast_copy(const char* name, address* entry, - bool dest_uninitialized = false) { + address generate_checkcast_copy(StubGenStubId stub_id, address* entry) { + bool dest_uninitialized; + switch (stub_id) { + case checkcast_arraycopy_id: + dest_uninitialized = false; + break; + case checkcast_arraycopy_uninit_id: + dest_uninitialized = true; + break; + default: + ShouldNotReachHere(); + } + Label L_load_element, L_store_element, L_do_card_marks, L_done, L_done_pop; // Input registers (after setup_arg_regs) @@ -1518,7 +1489,7 @@ class StubGenerator: public StubCodeGenerator { copied_oop, r9_klass, count_save); __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); // required for proper stackwalking of RuntimeStub frame @@ -1675,8 +1646,7 @@ class StubGenerator: public StubCodeGenerator { // Examines the alignment of the operands and dispatches // to a long, int, short, or byte copy loop. // - address generate_unsafe_copy(const char* name, - address byte_copy_entry, + address generate_unsafe_copy(address byte_copy_entry, address short_copy_entry, address int_copy_entry, address long_copy_entry) { @@ -1686,7 +1656,8 @@ class StubGenerator: public StubCodeGenerator { const Register s = c_rarg0, d = c_rarg1, count = c_rarg2; __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubGenStubId stub_id = StubGenStubId::unsafe_arraycopy_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); // required for proper stackwalking of RuntimeStub frame @@ -1731,8 +1702,7 @@ class StubGenerator: public StubCodeGenerator { // x10 == 0 - success // x10 == -1^K - failure, where K is partial transfer count // - address generate_generic_copy(const char* name, - address byte_copy_entry, address short_copy_entry, + address generate_generic_copy(address byte_copy_entry, address short_copy_entry, address int_copy_entry, address oop_copy_entry, address long_copy_entry, address checkcast_copy_entry) { assert_cond(byte_copy_entry != nullptr && short_copy_entry != nullptr && @@ -1753,7 +1723,8 @@ class StubGenerator: public StubCodeGenerator { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubGenStubId stub_id = StubGenStubId::generic_arraycopy_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -2009,9 +1980,41 @@ class StubGenerator: public StubCodeGenerator { // value: c_rarg1 // count: c_rarg2 treated as signed // - address generate_fill(BasicType t, bool aligned, const char* name) { + address generate_fill(StubGenStubId stub_id) { + BasicType t; + bool aligned; + + switch (stub_id) { + case jbyte_fill_id: + t = T_BYTE; + aligned = false; + break; + case jshort_fill_id: + t = T_SHORT; + aligned = false; + break; + case jint_fill_id: + t = T_INT; + aligned = false; + break; + case arrayof_jbyte_fill_id: + t = T_BYTE; + aligned = true; + break; + case arrayof_jshort_fill_id: + t = T_SHORT; + aligned = true; + break; + case arrayof_jint_fill_id: + t = T_INT; + aligned = true; + break; + default: + ShouldNotReachHere(); + }; + __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); BLOCK_COMMENT("Entry:"); @@ -2183,109 +2186,79 @@ class StubGenerator: public StubCodeGenerator { address entry_jlong_arraycopy = nullptr; address entry_checkcast_arraycopy = nullptr; - generate_copy_longs(copy_f, c_rarg0, c_rarg1, t1, copy_forwards); - generate_copy_longs(copy_b, c_rarg0, c_rarg1, t1, copy_backwards); + generate_copy_longs(StubGenStubId::copy_byte_f_id, copy_f, c_rarg0, c_rarg1, t1); + generate_copy_longs(StubGenStubId::copy_byte_b_id, copy_b, c_rarg0, c_rarg1, t1); StubRoutines::riscv::_zero_blocks = generate_zero_blocks(); //*** jbyte // Always need aligned and unaligned versions - StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, - "jbyte_disjoint_arraycopy"); - StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, - &entry_jbyte_arraycopy, - "jbyte_arraycopy"); - StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, &entry, - "arrayof_jbyte_disjoint_arraycopy"); - StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, entry, nullptr, - "arrayof_jbyte_arraycopy"); + StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_copy(StubGenStubId::jbyte_disjoint_arraycopy_id, &entry); + StubRoutines::_jbyte_arraycopy = generate_conjoint_copy(StubGenStubId::jbyte_arraycopy_id, entry, &entry_jbyte_arraycopy); + StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_copy(StubGenStubId::arrayof_jbyte_disjoint_arraycopy_id, &entry); + StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_copy(StubGenStubId::arrayof_jbyte_arraycopy_id, entry, nullptr); //*** jshort // Always need aligned and unaligned versions - StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, - "jshort_disjoint_arraycopy"); - StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, - &entry_jshort_arraycopy, - "jshort_arraycopy"); - StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, &entry, - "arrayof_jshort_disjoint_arraycopy"); - StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, entry, nullptr, - "arrayof_jshort_arraycopy"); + StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_copy(StubGenStubId::jshort_disjoint_arraycopy_id, &entry); + StubRoutines::_jshort_arraycopy = generate_conjoint_copy(StubGenStubId::jshort_arraycopy_id, entry, &entry_jshort_arraycopy); + StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_copy(StubGenStubId::arrayof_jshort_disjoint_arraycopy_id, &entry); + StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_copy(StubGenStubId::arrayof_jshort_arraycopy_id, entry, nullptr); //*** jint // Aligned versions - StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, &entry, - "arrayof_jint_disjoint_arraycopy"); - StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(true, entry, &entry_jint_arraycopy, - "arrayof_jint_arraycopy"); + StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_copy(StubGenStubId::arrayof_jint_disjoint_arraycopy_id, &entry); + StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_copy(StubGenStubId::arrayof_jint_arraycopy_id, entry, &entry_jint_arraycopy); // In 64 bit we need both aligned and unaligned versions of jint arraycopy. // entry_jint_arraycopy always points to the unaligned version - StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, &entry, - "jint_disjoint_arraycopy"); - StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, entry, - &entry_jint_arraycopy, - "jint_arraycopy"); + StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_copy(StubGenStubId::jint_disjoint_arraycopy_id, &entry); + StubRoutines::_jint_arraycopy = generate_conjoint_copy(StubGenStubId::jint_arraycopy_id, entry, &entry_jint_arraycopy); //*** jlong // It is always aligned - StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, &entry, - "arrayof_jlong_disjoint_arraycopy"); - StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_long_copy(true, entry, &entry_jlong_arraycopy, - "arrayof_jlong_arraycopy"); - StubRoutines::_jlong_disjoint_arraycopy = StubRoutines::_arrayof_jlong_disjoint_arraycopy; - StubRoutines::_jlong_arraycopy = StubRoutines::_arrayof_jlong_arraycopy; + StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_copy(StubGenStubId::arrayof_jlong_disjoint_arraycopy_id, &entry); + StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_copy(StubGenStubId::arrayof_jlong_arraycopy_id, entry, &entry_jlong_arraycopy); + StubRoutines::_jlong_disjoint_arraycopy = StubRoutines::_arrayof_jlong_disjoint_arraycopy; + StubRoutines::_jlong_arraycopy = StubRoutines::_arrayof_jlong_arraycopy; //*** oops - { - // With compressed oops we need unaligned versions; notice that - // we overwrite entry_oop_arraycopy. - bool aligned = !UseCompressedOops; - - StubRoutines::_arrayof_oop_disjoint_arraycopy - = generate_disjoint_oop_copy(aligned, &entry, "arrayof_oop_disjoint_arraycopy", - /*dest_uninitialized*/false); - StubRoutines::_arrayof_oop_arraycopy - = generate_conjoint_oop_copy(aligned, entry, &entry_oop_arraycopy, "arrayof_oop_arraycopy", - /*dest_uninitialized*/false); - // Aligned versions without pre-barriers - StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit - = generate_disjoint_oop_copy(aligned, &entry, "arrayof_oop_disjoint_arraycopy_uninit", - /*dest_uninitialized*/true); - StubRoutines::_arrayof_oop_arraycopy_uninit - = generate_conjoint_oop_copy(aligned, entry, nullptr, "arrayof_oop_arraycopy_uninit", - /*dest_uninitialized*/true); - } + StubRoutines::_arrayof_oop_disjoint_arraycopy + = generate_disjoint_copy(StubGenStubId::arrayof_oop_disjoint_arraycopy_id, &entry); + StubRoutines::_arrayof_oop_arraycopy + = generate_conjoint_copy(StubGenStubId::arrayof_oop_arraycopy_id, entry, &entry_oop_arraycopy); + // Aligned versions without pre-barriers + StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit + = generate_disjoint_copy(StubGenStubId::arrayof_oop_disjoint_arraycopy_uninit_id, &entry); + StubRoutines::_arrayof_oop_arraycopy_uninit + = generate_conjoint_copy(StubGenStubId::arrayof_oop_arraycopy_uninit_id, entry, nullptr); StubRoutines::_oop_disjoint_arraycopy = StubRoutines::_arrayof_oop_disjoint_arraycopy; StubRoutines::_oop_arraycopy = StubRoutines::_arrayof_oop_arraycopy; StubRoutines::_oop_disjoint_arraycopy_uninit = StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit; StubRoutines::_oop_arraycopy_uninit = StubRoutines::_arrayof_oop_arraycopy_uninit; - StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); - StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", nullptr, - /*dest_uninitialized*/true); + StubRoutines::_checkcast_arraycopy = generate_checkcast_copy(StubGenStubId::checkcast_arraycopy_id, &entry_checkcast_arraycopy); + StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy(StubGenStubId::checkcast_arraycopy_uninit_id, nullptr); - StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", - entry_jbyte_arraycopy, + StubRoutines::_unsafe_arraycopy = generate_unsafe_copy(entry_jbyte_arraycopy, entry_jshort_arraycopy, entry_jint_arraycopy, entry_jlong_arraycopy); - StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", - entry_jbyte_arraycopy, + StubRoutines::_generic_arraycopy = generate_generic_copy(entry_jbyte_arraycopy, entry_jshort_arraycopy, entry_jint_arraycopy, entry_oop_arraycopy, entry_jlong_arraycopy, entry_checkcast_arraycopy); - StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); - StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); - StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); - StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); - StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); - StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); + StubRoutines::_jbyte_fill = generate_fill(StubGenStubId::jbyte_fill_id); + StubRoutines::_jshort_fill = generate_fill(StubGenStubId::jshort_fill_id); + StubRoutines::_jint_fill = generate_fill(StubGenStubId::jint_fill_id); + StubRoutines::_arrayof_jbyte_fill = generate_fill(StubGenStubId::arrayof_jbyte_fill_id); + StubRoutines::_arrayof_jshort_fill = generate_fill(StubGenStubId::arrayof_jshort_fill_id); + StubRoutines::_arrayof_jint_fill = generate_fill(StubGenStubId::arrayof_jint_fill_id); } void generate_aes_loadkeys(const Register &key, VectorRegister *working_vregs, int rounds) { @@ -2321,7 +2294,8 @@ class StubGenerator: public StubCodeGenerator { assert(UseAESIntrinsics, "need AES instructions (Zvkned extension) support"); __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); + StubGenStubId stub_id = StubGenStubId::aescrypt_encryptBlock_id; + StubCodeMark mark(this, stub_id); Label L_aes128, L_aes192; @@ -2399,7 +2373,8 @@ class StubGenerator: public StubCodeGenerator { assert(UseAESIntrinsics, "need AES instructions (Zvkned extension) support"); __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); + StubGenStubId stub_id = StubGenStubId::aescrypt_decryptBlock_id; + StubCodeMark mark(this, stub_id); Label L_aes128, L_aes192; @@ -2499,9 +2474,20 @@ class StubGenerator: public StubCodeGenerator { // x28 = tmp1 // x29 = tmp2 // x30 = tmp3 - address generate_compare_long_string_different_encoding(bool isLU) { + address generate_compare_long_string_different_encoding(StubGenStubId stub_id) { + bool isLU; + switch (stub_id) { + case compare_long_string_LU_id: + isLU = true; + break; + case compare_long_string_UL_id: + isLU = false; + break; + default: + ShouldNotReachHere(); + }; __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", isLU ? "compare_long_string_different_encoding LU" : "compare_long_string_different_encoding UL"); + StubCodeMark mark(this, stub_id); address entry = __ pc(); Label SMALL_LOOP, TAIL, LOAD_LAST, DONE, CALCULATE_DIFFERENCE; const Register result = x10, str1 = x11, str2 = x13, cnt2 = x14, @@ -2595,7 +2581,8 @@ class StubGenerator: public StubCodeGenerator { address generate_method_entry_barrier() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "nmethod_entry_barrier"); + StubGenStubId stub_id = StubGenStubId::method_entry_barrier_id; + StubCodeMark mark(this, stub_id); Label deoptimize_label; @@ -2665,10 +2652,20 @@ class StubGenerator: public StubCodeGenerator { // x29 = tmp2 // x30 = tmp3 // x31 = tmp4 - address generate_compare_long_string_same_encoding(bool isLL) { + address generate_compare_long_string_same_encoding(StubGenStubId stub_id) { + bool isLL; + switch (stub_id) { + case compare_long_string_LL_id: + isLL = true; + break; + case compare_long_string_UU_id: + isLL = false; + break; + default: + ShouldNotReachHere(); + }; __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", isLL ? - "compare_long_string_same_encoding LL" : "compare_long_string_same_encoding UU"); + StubCodeMark mark(this, stub_id); address entry = __ pc(); Label SMALL_LOOP, CHECK_LAST, DIFF2, TAIL, LENGTH_DIFF, DIFF, LAST_CHECK_AND_LENGTH_DIFF; @@ -2754,10 +2751,10 @@ class StubGenerator: public StubCodeGenerator { } void generate_compare_long_strings() { - StubRoutines::riscv::_compare_long_string_LL = generate_compare_long_string_same_encoding(true); - StubRoutines::riscv::_compare_long_string_UU = generate_compare_long_string_same_encoding(false); - StubRoutines::riscv::_compare_long_string_LU = generate_compare_long_string_different_encoding(true); - StubRoutines::riscv::_compare_long_string_UL = generate_compare_long_string_different_encoding(false); + StubRoutines::riscv::_compare_long_string_LL = generate_compare_long_string_same_encoding(StubGenStubId::compare_long_string_LL_id); + StubRoutines::riscv::_compare_long_string_UU = generate_compare_long_string_same_encoding(StubGenStubId::compare_long_string_UU_id); + StubRoutines::riscv::_compare_long_string_LU = generate_compare_long_string_different_encoding(StubGenStubId::compare_long_string_LU_id); + StubRoutines::riscv::_compare_long_string_UL = generate_compare_long_string_different_encoding(StubGenStubId::compare_long_string_UL_id); } // x10 result @@ -2765,13 +2762,29 @@ class StubGenerator: public StubCodeGenerator { // x12 src count // x13 pattern // x14 pattern count - address generate_string_indexof_linear(bool needle_isL, bool haystack_isL) + address generate_string_indexof_linear(StubGenStubId stub_id) { - const char* stubName = needle_isL - ? (haystack_isL ? "indexof_linear_ll" : "indexof_linear_ul") - : "indexof_linear_uu"; + bool needle_isL; + bool haystack_isL; + switch (stub_id) { + case string_indexof_linear_ll_id: + needle_isL = true; + haystack_isL = true; + break; + case string_indexof_linear_ul_id: + needle_isL = true; + haystack_isL = false; + break; + case string_indexof_linear_uu_id: + needle_isL = false; + haystack_isL = false; + break; + default: + ShouldNotReachHere(); + }; + __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", stubName); + StubCodeMark mark(this, stub_id); address entry = __ pc(); int needle_chr_size = needle_isL ? 1 : 2; @@ -2998,16 +3011,16 @@ class StubGenerator: public StubCodeGenerator { void generate_string_indexof_stubs() { - StubRoutines::riscv::_string_indexof_linear_ll = generate_string_indexof_linear(true, true); - StubRoutines::riscv::_string_indexof_linear_uu = generate_string_indexof_linear(false, false); - StubRoutines::riscv::_string_indexof_linear_ul = generate_string_indexof_linear(true, false); + StubRoutines::riscv::_string_indexof_linear_ll = generate_string_indexof_linear(StubGenStubId::string_indexof_linear_ll_id); + StubRoutines::riscv::_string_indexof_linear_uu = generate_string_indexof_linear(StubGenStubId::string_indexof_linear_uu_id); + StubRoutines::riscv::_string_indexof_linear_ul = generate_string_indexof_linear(StubGenStubId::string_indexof_linear_ul_id); } #ifdef COMPILER2 - address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) { - StubCodeMark mark(this, "StubRoutines", "lookup_secondary_supers_table"); + void generate_lookup_secondary_supers_table_stub() { + StubGenStubId stub_id = StubGenStubId::lookup_secondary_supers_table_id; + StubCodeMark mark(this, stub_id); - address start = __ pc(); const Register r_super_klass = x10, r_array_base = x11, @@ -3017,20 +3030,22 @@ class StubGenerator: public StubCodeGenerator { result = x15, r_bitmap = x16; - Label L_success; - __ enter(); - __ lookup_secondary_supers_table_const(r_sub_klass, r_super_klass, result, - r_array_base, r_array_length, r_array_index, - r_bitmap, super_klass_index, /*stub_is_near*/ true); - __ leave(); - __ ret(); - - return start; + for (int slot = 0; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) { + StubRoutines::_lookup_secondary_supers_table_stubs[slot] = __ pc(); + Label L_success; + __ enter(); + __ lookup_secondary_supers_table_const(r_sub_klass, r_super_klass, result, + r_array_base, r_array_length, r_array_index, + r_bitmap, slot, /*stub_is_near*/true); + __ leave(); + __ ret(); + } } // Slow path implementation for UseSecondarySupersTable. address generate_lookup_secondary_supers_table_slow_path_stub() { - StubCodeMark mark(this, "StubRoutines", "lookup_secondary_supers_table_slow_path"); + StubGenStubId stub_id = StubGenStubId::lookup_secondary_supers_table_slow_path_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); const Register @@ -3051,7 +3066,8 @@ class StubGenerator: public StubCodeGenerator { address generate_mulAdd() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "mulAdd"); + StubGenStubId stub_id = StubGenStubId::mulAdd_id; + StubCodeMark mark(this, stub_id); address entry = __ pc(); @@ -3084,7 +3100,8 @@ class StubGenerator: public StubCodeGenerator { address generate_multiplyToLen() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); + StubGenStubId stub_id = StubGenStubId::multiplyToLen_id; + StubCodeMark mark(this, stub_id); address entry = __ pc(); const Register x = x10; @@ -3114,7 +3131,8 @@ class StubGenerator: public StubCodeGenerator { address generate_squareToLen() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "squareToLen"); + StubGenStubId stub_id = StubGenStubId::squareToLen_id; + StubCodeMark mark(this, stub_id); address entry = __ pc(); const Register x = x10; @@ -3154,7 +3172,8 @@ class StubGenerator: public StubCodeGenerator { // address generate_bigIntegerLeftShift() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "bigIntegerLeftShiftWorker"); + StubGenStubId stub_id = StubGenStubId::bigIntegerLeftShiftWorker_id; + StubCodeMark mark(this, stub_id); address entry = __ pc(); Label loop, exit; @@ -3205,7 +3224,8 @@ class StubGenerator: public StubCodeGenerator { // address generate_bigIntegerRightShift() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "bigIntegerRightShiftWorker"); + StubGenStubId stub_id = StubGenStubId::bigIntegerRightShiftWorker_id; + StubCodeMark mark(this, stub_id); address entry = __ pc(); Label loop, exit; @@ -4031,7 +4051,8 @@ class StubGenerator: public StubCodeGenerator { address generate_cont_thaw() { if (!Continuations::enabled()) return nullptr; - StubCodeMark mark(this, "StubRoutines", "Cont thaw"); + StubGenStubId stub_id = StubGenStubId::cont_thaw_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); generate_cont_thaw(Continuation::thaw_top); return start; @@ -4041,7 +4062,8 @@ class StubGenerator: public StubCodeGenerator { if (!Continuations::enabled()) return nullptr; // TODO: will probably need multiple return barriers depending on return type - StubCodeMark mark(this, "StubRoutines", "cont return barrier"); + StubGenStubId stub_id = StubGenStubId::cont_returnBarrier_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); generate_cont_thaw(Continuation::thaw_return_barrier); @@ -4052,7 +4074,8 @@ class StubGenerator: public StubCodeGenerator { address generate_cont_returnBarrier_exception() { if (!Continuations::enabled()) return nullptr; - StubCodeMark mark(this, "StubRoutines", "cont return barrier exception handler"); + StubGenStubId stub_id = StubGenStubId::cont_returnBarrierExc_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); generate_cont_thaw(Continuation::thaw_return_barrier_exception); @@ -4062,7 +4085,8 @@ class StubGenerator: public StubCodeGenerator { address generate_cont_preempt_stub() { if (!Continuations::enabled()) return nullptr; - StubCodeMark mark(this, "StubRoutines","Continuation preempt stub"); + StubGenStubId stub_id = StubGenStubId::cont_preempt_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ reset_last_Java_frame(true); @@ -4099,11 +4123,11 @@ class StubGenerator: public StubCodeGenerator { StubCodeGenerator* _cgen; public: Sha2Generator(MacroAssembler* masm, StubCodeGenerator* cgen) : MacroAssembler(masm->code()), _cgen(cgen) {} - address generate_sha256_implCompress(bool multi_block) { - return generate_sha2_implCompress(Assembler::e32, multi_block); + address generate_sha256_implCompress(StubGenStubId stub_id) { + return generate_sha2_implCompress(Assembler::e32, stub_id); } - address generate_sha512_implCompress(bool multi_block) { - return generate_sha2_implCompress(Assembler::e64, multi_block); + address generate_sha512_implCompress(StubGenStubId stub_id) { + return generate_sha2_implCompress(Assembler::e64, stub_id); } private: @@ -4228,15 +4252,6 @@ class StubGenerator: public StubCodeGenerator { } } - const char* stub_name(Assembler::SEW vset_sew, bool multi_block) { - if (vset_sew == Assembler::e32 && !multi_block) return "sha256_implCompress"; - if (vset_sew == Assembler::e32 && multi_block) return "sha256_implCompressMB"; - if (vset_sew == Assembler::e64 && !multi_block) return "sha512_implCompress"; - if (vset_sew == Assembler::e64 && multi_block) return "sha512_implCompressMB"; - ShouldNotReachHere(); - return "bad name lookup"; - } - // Arguments: // // Inputs: @@ -4245,7 +4260,7 @@ class StubGenerator: public StubCodeGenerator { // c_rarg2 - int offset // c_rarg3 - int limit // - address generate_sha2_implCompress(Assembler::SEW vset_sew, bool multi_block) { + address generate_sha2_implCompress(Assembler::SEW vset_sew, StubGenStubId stub_id) { alignas(64) static const uint32_t round_consts_256[64] = { 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, @@ -4295,8 +4310,29 @@ class StubGenerator: public StubCodeGenerator { }; const int const_add = vset_sew == Assembler::e32 ? 16 : 32; + bool multi_block; + switch (stub_id) { + case sha256_implCompress_id: + assert (vset_sew == Assembler::e32, "wrong macroassembler for stub"); + multi_block = false; + break; + case sha256_implCompressMB_id: + assert (vset_sew == Assembler::e32, "wrong macroassembler for stub"); + multi_block = true; + break; + case sha512_implCompress_id: + assert (vset_sew == Assembler::e64, "wrong macroassembler for stub"); + multi_block = false; + break; + case sha512_implCompressMB_id: + assert (vset_sew == Assembler::e64, "wrong macroassembler for stub"); + multi_block = true; + break; + default: + ShouldNotReachHere(); + }; __ align(CodeEntryAlignment); - StubCodeMark mark(_cgen, "StubRoutines", stub_name(vset_sew, multi_block)); + StubCodeMark mark(_cgen, stub_id); address start = __ pc(); Register buf = c_rarg0; @@ -4643,9 +4679,20 @@ class StubGenerator: public StubCodeGenerator { // x29 t4 buf5 // x30 t5 buf6 // x31 t6 buf7 - address generate_md5_implCompress(bool multi_block, const char *name) { + address generate_md5_implCompress(StubGenStubId stub_id) { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + bool multi_block; + switch (stub_id) { + case md5_implCompress_id: + multi_block = false; + break; + case md5_implCompressMB_id: + multi_block = true; + break; + default: + ShouldNotReachHere(); + }; + StubCodeMark mark(this, stub_id); address start = __ pc(); // rotation constants @@ -4899,7 +4946,8 @@ class StubGenerator: public StubCodeGenerator { Label L_Rounds; __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "chacha20Block"); + StubGenStubId stub_id = StubGenStubId::chacha20Block_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -5198,9 +5246,20 @@ class StubGenerator: public StubCodeGenerator { // - - - - - - below are only for implCompressMultiBlock0 - - - - - - // c_rarg0: int offset, when (multi_block == true) // - address generate_sha1_implCompress(bool multi_block, const char *name) { + address generate_sha1_implCompress(StubGenStubId stub_id) { + bool multi_block; + switch (stub_id) { + case sha1_implCompress_id: + multi_block = false; + break; + case sha1_implCompressMB_id: + multi_block = true; + break; + default: + ShouldNotReachHere(); + }; __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -5410,7 +5469,8 @@ class StubGenerator: public StubCodeGenerator { }; __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "encodeBlock"); + StubGenStubId stub_id = StubGenStubId::base64_encodeBlock_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -5666,7 +5726,8 @@ class StubGenerator: public StubCodeGenerator { }; __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "decodeBlock"); + StubGenStubId stub_id = StubGenStubId::base64_decodeBlock_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -5910,7 +5971,8 @@ class StubGenerator: public StubCodeGenerator { */ address generate_updateBytesAdler32() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "updateBytesAdler32"); + StubGenStubId stub_id = StubGenStubId::updateBytesAdler32_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); Label L_nmax, L_nmax_loop, L_nmax_loop_entry, L_by16, L_by16_loop, @@ -6168,7 +6230,8 @@ static const int64_t right_3_bits = right_n_bits(3); address generate_poly1305_processBlocks() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "poly1305_processBlocks"); + StubGenStubId stub_id = StubGenStubId::poly1305_processBlocks_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); Label here; @@ -6357,7 +6420,8 @@ static const int64_t right_3_bits = right_n_bits(3); assert(UseCRC32Intrinsics, "what are we doing here?"); __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); + StubGenStubId stub_id = StubGenStubId::updateBytesCRC32_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -6381,7 +6445,8 @@ static const int64_t right_3_bits = right_n_bits(3); // exception handler for upcall stubs address generate_upcall_stub_exception_handler() { - StubCodeMark mark(this, "StubRoutines", "upcall stub exception handler"); + StubGenStubId stub_id = StubGenStubId::upcall_stub_exception_handler_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); // Native caller has no idea how to handle exceptions, @@ -6398,7 +6463,8 @@ static const int64_t right_3_bits = right_n_bits(3); // xmethod = Method* result address generate_upcall_stub_load_target() { - StubCodeMark mark(this, "StubRoutines", "upcall_stub_load_target"); + StubGenStubId stub_id = StubGenStubId::upcall_stub_load_target_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ resolve_global_jobject(j_rarg0, t0, t1); @@ -6473,10 +6539,7 @@ static const int64_t right_3_bits = right_n_bits(3); if (UseSecondarySupersTable) { StubRoutines::_lookup_secondary_supers_table_slow_path_stub = generate_lookup_secondary_supers_table_slow_path_stub(); if (!InlineSecondarySupersTest) { - for (int slot = 0; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) { - StubRoutines::_lookup_secondary_supers_table_stubs[slot] - = generate_lookup_secondary_supers_table_stub(slot); - } + generate_lookup_secondary_supers_table_stub(); } } #endif // COMPILER2 @@ -6502,13 +6565,15 @@ static const int64_t right_3_bits = right_n_bits(3); } if (UseMontgomeryMultiplyIntrinsic) { - StubCodeMark mark(this, "StubRoutines", "montgomeryMultiply"); + StubGenStubId stub_id = StubGenStubId::montgomeryMultiply_id; + StubCodeMark mark(this, stub_id); MontgomeryMultiplyGenerator g(_masm, /*squaring*/false); StubRoutines::_montgomeryMultiply = g.generate_multiply(); } if (UseMontgomerySquareIntrinsic) { - StubCodeMark mark(this, "StubRoutines", "montgomerySquare"); + StubGenStubId stub_id = StubGenStubId::montgomerySquare_id; + StubCodeMark mark(this, stub_id); MontgomeryMultiplyGenerator g(_masm, /*squaring*/true); StubRoutines::_montgomerySquare = g.generate_square(); } @@ -6529,19 +6594,19 @@ static const int64_t right_3_bits = right_n_bits(3); if (UseSHA256Intrinsics) { Sha2Generator sha2(_masm, this); - StubRoutines::_sha256_implCompress = sha2.generate_sha256_implCompress(false); - StubRoutines::_sha256_implCompressMB = sha2.generate_sha256_implCompress(true); + StubRoutines::_sha256_implCompress = sha2.generate_sha256_implCompress(StubGenStubId::sha256_implCompress_id); + StubRoutines::_sha256_implCompressMB = sha2.generate_sha256_implCompress(StubGenStubId::sha256_implCompressMB_id); } if (UseSHA512Intrinsics) { Sha2Generator sha2(_masm, this); - StubRoutines::_sha512_implCompress = sha2.generate_sha512_implCompress(false); - StubRoutines::_sha512_implCompressMB = sha2.generate_sha512_implCompress(true); + StubRoutines::_sha512_implCompress = sha2.generate_sha512_implCompress(StubGenStubId::sha512_implCompress_id); + StubRoutines::_sha512_implCompressMB = sha2.generate_sha512_implCompress(StubGenStubId::sha512_implCompressMB_id); } if (UseMD5Intrinsics) { - StubRoutines::_md5_implCompress = generate_md5_implCompress(false, "md5_implCompress"); - StubRoutines::_md5_implCompressMB = generate_md5_implCompress(true, "md5_implCompressMB"); + StubRoutines::_md5_implCompress = generate_md5_implCompress(StubGenStubId::md5_implCompress_id); + StubRoutines::_md5_implCompressMB = generate_md5_implCompress(StubGenStubId::md5_implCompressMB_id); } if (UseChaCha20Intrinsics) { @@ -6549,8 +6614,8 @@ static const int64_t right_3_bits = right_n_bits(3); } if (UseSHA1Intrinsics) { - StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress"); - StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB"); + StubRoutines::_sha1_implCompress = generate_sha1_implCompress(StubGenStubId::sha1_implCompress_id); + StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(StubGenStubId::sha1_implCompressMB_id); } if (UseBASE64Intrinsics) { @@ -6572,27 +6637,27 @@ static const int64_t right_3_bits = right_n_bits(3); } public: - StubGenerator(CodeBuffer* code, StubsKind kind) : StubCodeGenerator(code) { - switch(kind) { - case Initial_stubs: + StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) { + switch(blob_id) { + case initial_id: generate_initial_stubs(); break; - case Continuation_stubs: + case continuation_id: generate_continuation_stubs(); break; - case Compiler_stubs: + case compiler_id: generate_compiler_stubs(); break; - case Final_stubs: + case final_id: generate_final_stubs(); break; default: - fatal("unexpected stubs kind: %d", kind); + fatal("unexpected blob id: %d", blob_id); break; }; } }; // end class declaration -void StubGenerator_generate(CodeBuffer* code, StubCodeGenerator::StubsKind kind) { - StubGenerator g(code, kind); +void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id) { + StubGenerator g(code, blob_id); } diff --git a/src/hotspot/cpu/riscv/stubRoutines_riscv.cpp b/src/hotspot/cpu/riscv/stubRoutines_riscv.cpp index ae2e81f509e10..2a1150276c1be 100644 --- a/src/hotspot/cpu/riscv/stubRoutines_riscv.cpp +++ b/src/hotspot/cpu/riscv/stubRoutines_riscv.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2014, Red Hat Inc. All rights reserved. + * Copyright (c) 2014, 2025, Red Hat Inc. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -33,14 +33,19 @@ // Implementation of the platform-specific part of StubRoutines - for // a description of how to extend it, see the stubRoutines.hpp file. -address StubRoutines::riscv::_zero_blocks = nullptr; -address StubRoutines::riscv::_compare_long_string_LL = nullptr; -address StubRoutines::riscv::_compare_long_string_UU = nullptr; -address StubRoutines::riscv::_compare_long_string_LU = nullptr; -address StubRoutines::riscv::_compare_long_string_UL = nullptr; -address StubRoutines::riscv::_string_indexof_linear_ll = nullptr; -address StubRoutines::riscv::_string_indexof_linear_uu = nullptr; -address StubRoutines::riscv::_string_indexof_linear_ul = nullptr; + +// define fields for arch-specific entries + +#define DEFINE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) \ + address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = nullptr; + +#define DEFINE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \ + address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = CAST_FROM_FN_PTR(address, init_function); + +STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY, DEFINE_ARCH_ENTRY_INIT) + +#undef DEFINE_ARCH_ENTRY_INIT +#undef DEFINE_ARCH_ENTRY bool StubRoutines::riscv::_completed = false; diff --git a/src/hotspot/cpu/riscv/stubRoutines_riscv.hpp b/src/hotspot/cpu/riscv/stubRoutines_riscv.hpp index 3bc5aeaa26826..1cd10b996dbcf 100644 --- a/src/hotspot/cpu/riscv/stubRoutines_riscv.hpp +++ b/src/hotspot/cpu/riscv/stubRoutines_riscv.hpp @@ -35,63 +35,53 @@ static bool returns_to_call_stub(address return_pc) { return return_pc == _call_stub_return_address; } +// emit enum used to size per-blob code buffers + +#define DEFINE_BLOB_SIZE(blob_name, size) \ + _ ## blob_name ## _code_size = size, + enum platform_dependent_constants { - // simply increase sizes if too small (assembler will crash if too small) - _initial_stubs_code_size = 10000, - _continuation_stubs_code_size = 2000, - _compiler_stubs_code_size = 45000, - _final_stubs_code_size = 20000 ZGC_ONLY(+10000) + STUBGEN_ARCH_BLOBS_DO(DEFINE_BLOB_SIZE) }; +#undef DEFINE_BLOB_SIZE + class riscv { friend class StubGenerator; +#if INCLUDE_JVMCI + friend class JVMCIVMStructs; +#endif - private: - static address _zero_blocks; + // declare fields for arch-specific entries - static address _compare_long_string_LL; - static address _compare_long_string_LU; - static address _compare_long_string_UL; - static address _compare_long_string_UU; - static address _string_indexof_linear_ll; - static address _string_indexof_linear_uu; - static address _string_indexof_linear_ul; +#define DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) \ + static address STUB_FIELD_NAME(field_name) ; - static bool _completed; +#define DECLARE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \ + DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) - public: +private: + STUBGEN_ARCH_ENTRIES_DO(DECLARE_ARCH_ENTRY, DECLARE_ARCH_ENTRY_INIT) - static address zero_blocks() { - return _zero_blocks; - } +#undef DECLARE_ARCH_ENTRY_INIT +#undef DECLARE_ARCH_ENTRY - static address compare_long_string_LL() { - return _compare_long_string_LL; - } + static bool _completed; - static address compare_long_string_LU() { - return _compare_long_string_LU; - } + public: - static address compare_long_string_UL() { - return _compare_long_string_UL; - } + // declare getters for arch-specific entries - static address compare_long_string_UU() { - return _compare_long_string_UU; - } +#define DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name) \ + static address getter_name() { return STUB_FIELD_NAME(field_name) ; } - static address string_indexof_linear_ul() { - return _string_indexof_linear_ul; - } +#define DEFINE_ARCH_ENTRY_GETTER_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \ + DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name) - static address string_indexof_linear_ll() { - return _string_indexof_linear_ll; - } + STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY_GETTER, DEFINE_ARCH_ENTRY_GETTER_INIT) - static address string_indexof_linear_uu() { - return _string_indexof_linear_uu; - } +#undef DEFINE_ARCH_ENTRY_GETTER_INIT +#undef DEFINE_ARCH_ENTRY_GETTER static bool complete() { return _completed; diff --git a/src/hotspot/cpu/s390/stubDeclarations_s390.hpp b/src/hotspot/cpu/s390/stubDeclarations_s390.hpp new file mode 100644 index 0000000000000..f382a319c489e --- /dev/null +++ b/src/hotspot/cpu/s390/stubDeclarations_s390.hpp @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2025, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_S390_STUBDECLARATIONS_HPP +#define CPU_S390_STUBDECLARATIONS_HPP + +#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \ + do_arch_blob, \ + do_arch_entry, \ + do_arch_entry_init) \ + do_arch_blob(initial, 20000) \ + + +#define STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, \ + do_arch_blob, \ + do_arch_entry, \ + do_arch_entry_init) \ + do_arch_blob(continuation, 2000) \ + + +#define STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, \ + do_arch_blob, \ + do_arch_entry, \ + do_arch_entry_init) \ + do_arch_blob(compiler, 20000 ) \ + do_stub(compiler, partial_subtype_check) \ + do_arch_entry(zarch, compiler, partial_subtype_check, \ + partial_subtype_check, partial_subtype_check) \ + + +#define STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, \ + do_arch_blob, \ + do_arch_entry, \ + do_arch_entry_init) \ + do_arch_blob(final, 20000) \ + + +#endif // CPU_S390_STUBDECLARATIONS_HPP diff --git a/src/hotspot/cpu/s390/stubGenerator_s390.cpp b/src/hotspot/cpu/s390/stubGenerator_s390.cpp index 9e82d42e077b4..f542c125b3639 100644 --- a/src/hotspot/cpu/s390/stubGenerator_s390.cpp +++ b/src/hotspot/cpu/s390/stubGenerator_s390.cpp @@ -118,7 +118,8 @@ class StubGenerator: public StubCodeGenerator { // Set up a new C frame, copy Java arguments, call frame manager // or native_entry, and process result. - StubCodeMark mark(this, "StubRoutines", "call_stub"); + StubGenStubId stub_id = StubGenStubId::call_stub_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); Register r_arg_call_wrapper_addr = Z_ARG1; @@ -458,7 +459,8 @@ class StubGenerator: public StubCodeGenerator { // pending exception stored in JavaThread that can be tested from // within the VM. address generate_catch_exception() { - StubCodeMark mark(this, "StubRoutines", "catch_exception"); + StubGenStubId stub_id = StubGenStubId::catch_exception_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -509,7 +511,8 @@ class StubGenerator: public StubCodeGenerator { // (Z_R14 is unchanged and is live out). // address generate_forward_exception() { - StubCodeMark mark(this, "StubRoutines", "forward_exception"); + StubGenStubId stub_id = StubGenStubId::forward_exception_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); #define pending_exception_offset in_bytes(Thread::pending_exception_offset()) @@ -589,7 +592,8 @@ class StubGenerator: public StubCodeGenerator { // raddr: Z_R14, blown by call // address generate_partial_subtype_check() { - StubCodeMark mark(this, "StubRoutines", "partial_subtype_check"); + StubGenStubId stub_id = StubGenStubId::partial_subtype_check_id; + StubCodeMark mark(this, stub_id); Label miss; address start = __ pc(); @@ -621,8 +625,9 @@ class StubGenerator: public StubCodeGenerator { return start; } - address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) { - StubCodeMark mark(this, "StubRoutines", "lookup_secondary_supers_table"); + void generate_lookup_secondary_supers_table_stub() { + StubGenStubId stub_id = StubGenStubId::lookup_secondary_supers_table_id; + StubCodeMark mark(this, stub_id); const Register r_super_klass = Z_ARG1, @@ -632,20 +637,20 @@ class StubGenerator: public StubCodeGenerator { r_array_base = Z_ARG5, r_bitmap = Z_R10, r_result = Z_R11; - address start = __ pc(); - - __ lookup_secondary_supers_table_const(r_sub_klass, r_super_klass, - r_array_base, r_array_length, r_array_index, - r_bitmap, r_result, super_klass_index); - - __ z_br(Z_R14); + for (int slot = 0; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) { + StubRoutines::_lookup_secondary_supers_table_stubs[slot] = __ pc(); + __ lookup_secondary_supers_table_const(r_sub_klass, r_super_klass, + r_array_base, r_array_length, r_array_index, + r_bitmap, r_result, slot); - return start; + __ z_br(Z_R14); + } } // Slow path implementation for UseSecondarySupersTable. address generate_lookup_secondary_supers_table_slow_path_stub() { - StubCodeMark mark(this, "StubRoutines", "lookup_secondary_supers_table_slow_path"); + StubGenStubId stub_id = StubGenStubId::lookup_secondary_supers_table_slow_path_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -1260,51 +1265,75 @@ class StubGenerator: public StubCodeGenerator { } } - // Generate stub for disjoint byte copy. If "aligned" is true, the - // "from" and "to" addresses are assumed to be heapword aligned. - address generate_disjoint_byte_copy(bool aligned, const char * name) { - StubCodeMark mark(this, "StubRoutines", name); - - // This is the zarch specific stub generator for byte array copy. - // Refer to generate_disjoint_copy for a list of prereqs and features: - unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). - generate_disjoint_copy(aligned, 1, false, false); - return __ addr_at(start_off); - } - - - address generate_disjoint_short_copy(bool aligned, const char * name) { - StubCodeMark mark(this, "StubRoutines", name); - // This is the zarch specific stub generator for short array copy. - // Refer to generate_disjoint_copy for a list of prereqs and features: - unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). - generate_disjoint_copy(aligned, 2, false, false); - return __ addr_at(start_off); - } - - - address generate_disjoint_int_copy(bool aligned, const char * name) { - StubCodeMark mark(this, "StubRoutines", name); - // This is the zarch specific stub generator for int array copy. - // Refer to generate_disjoint_copy for a list of prereqs and features: - unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). - generate_disjoint_copy(aligned, 4, false, false); - return __ addr_at(start_off); - } - - - address generate_disjoint_long_copy(bool aligned, const char * name) { - StubCodeMark mark(this, "StubRoutines", name); - // This is the zarch specific stub generator for long array copy. - // Refer to generate_disjoint_copy for a list of prereqs and features: + address generate_disjoint_nonoop_copy(StubGenStubId stub_id) { + bool aligned; + int element_size; + switch (stub_id) { + case jbyte_disjoint_arraycopy_id: + aligned = false; + element_size = 1; + break; + case arrayof_jbyte_disjoint_arraycopy_id: + aligned = true; + element_size = 1; + break; + case jshort_disjoint_arraycopy_id: + aligned = false; + element_size = 2; + break; + case arrayof_jshort_disjoint_arraycopy_id: + aligned = true; + element_size = 2; + break; + case jint_disjoint_arraycopy_id: + aligned = false; + element_size = 4; + break; + case arrayof_jint_disjoint_arraycopy_id: + aligned = true; + element_size = 4; + break; + case jlong_disjoint_arraycopy_id: + aligned = false; + element_size = 8; + break; + case arrayof_jlong_disjoint_arraycopy_id: + aligned = true; + element_size = 8; + break; + default: + ShouldNotReachHere(); + } + StubCodeMark mark(this, stub_id); unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). - generate_disjoint_copy(aligned, 8, false, false); + generate_disjoint_copy(aligned, element_size, false, false); return __ addr_at(start_off); } - - address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) { - StubCodeMark mark(this, "StubRoutines", name); + address generate_disjoint_oop_copy(StubGenStubId stub_id) { + bool aligned; + bool dest_uninitialized; + switch (stub_id) { + case oop_disjoint_arraycopy_id: + aligned = false; + dest_uninitialized = false; + break; + case arrayof_oop_disjoint_arraycopy_id: + aligned = true; + dest_uninitialized = false; + break; + case oop_disjoint_arraycopy_uninit_id: + aligned = false; + dest_uninitialized = true; + break; + case arrayof_oop_disjoint_arraycopy_uninit_id: + aligned = true; + dest_uninitialized = true; + break; + default: + ShouldNotReachHere(); + } + StubCodeMark mark(this, stub_id); // This is the zarch specific stub generator for oop array copy. // Refer to generate_disjoint_copy for a list of prereqs and features. unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). @@ -1328,77 +1357,96 @@ class StubGenerator: public StubCodeGenerator { return __ addr_at(start_off); } - - address generate_conjoint_byte_copy(bool aligned, const char * name) { - StubCodeMark mark(this, "StubRoutines", name); - // This is the zarch specific stub generator for overlapping byte array copy. - // Refer to generate_conjoint_copy for a list of prereqs and features: - unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). - address nooverlap_target = aligned ? StubRoutines::arrayof_jbyte_disjoint_arraycopy() - : StubRoutines::jbyte_disjoint_arraycopy(); - - array_overlap_test(nooverlap_target, 0); // Branch away to nooverlap_target if disjoint. - generate_conjoint_copy(aligned, 1, false); - - return __ addr_at(start_off); - } - - - address generate_conjoint_short_copy(bool aligned, const char * name) { - StubCodeMark mark(this, "StubRoutines", name); - // This is the zarch specific stub generator for overlapping short array copy. - // Refer to generate_conjoint_copy for a list of prereqs and features: - unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). - address nooverlap_target = aligned ? StubRoutines::arrayof_jshort_disjoint_arraycopy() - : StubRoutines::jshort_disjoint_arraycopy(); - - array_overlap_test(nooverlap_target, 1); // Branch away to nooverlap_target if disjoint. - generate_conjoint_copy(aligned, 2, false); - - return __ addr_at(start_off); - } - - address generate_conjoint_int_copy(bool aligned, const char * name) { - StubCodeMark mark(this, "StubRoutines", name); - // This is the zarch specific stub generator for overlapping int array copy. - // Refer to generate_conjoint_copy for a list of prereqs and features: - - unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). - address nooverlap_target = aligned ? StubRoutines::arrayof_jint_disjoint_arraycopy() - : StubRoutines::jint_disjoint_arraycopy(); - - array_overlap_test(nooverlap_target, 2); // Branch away to nooverlap_target if disjoint. - generate_conjoint_copy(aligned, 4, false); - - return __ addr_at(start_off); - } - - address generate_conjoint_long_copy(bool aligned, const char * name) { - StubCodeMark mark(this, "StubRoutines", name); - // This is the zarch specific stub generator for overlapping long array copy. - // Refer to generate_conjoint_copy for a list of prereqs and features: - - unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). - address nooverlap_target = aligned ? StubRoutines::arrayof_jlong_disjoint_arraycopy() - : StubRoutines::jlong_disjoint_arraycopy(); - - array_overlap_test(nooverlap_target, 3); // Branch away to nooverlap_target if disjoint. - generate_conjoint_copy(aligned, 8, false); - + address generate_conjoint_nonoop_copy(StubGenStubId stub_id) { + bool aligned; + int shift; // i.e. log2(element size) + address nooverlap_target; + switch (stub_id) { + case jbyte_arraycopy_id: + aligned = false; + shift = 0; + nooverlap_target = StubRoutines::jbyte_disjoint_arraycopy(); + break; + case arrayof_jbyte_arraycopy_id: + aligned = true; + shift = 0; + nooverlap_target = StubRoutines::arrayof_jbyte_disjoint_arraycopy(); + break; + case jshort_arraycopy_id: + aligned = false; + shift = 1; + nooverlap_target = StubRoutines::jshort_disjoint_arraycopy(); + break; + case arrayof_jshort_arraycopy_id: + aligned = true; + shift = 1; + nooverlap_target = StubRoutines::arrayof_jshort_disjoint_arraycopy(); + break; + case jint_arraycopy_id: + aligned = false; + shift = 2; + nooverlap_target = StubRoutines::jint_disjoint_arraycopy(); + break; + case arrayof_jint_arraycopy_id: + aligned = true; + shift = 2; + nooverlap_target = StubRoutines::arrayof_jint_disjoint_arraycopy(); + break; + case jlong_arraycopy_id: + aligned = false; + shift = 3; + nooverlap_target = StubRoutines::jlong_disjoint_arraycopy(); + break; + case arrayof_jlong_arraycopy_id: + aligned = true; + shift = 3; + nooverlap_target = StubRoutines::arrayof_jlong_disjoint_arraycopy(); + break; + default: + ShouldNotReachHere(); + } + StubCodeMark mark(this, stub_id); + unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). + array_overlap_test(nooverlap_target, shift); // Branch away to nooverlap_target if disjoint. + generate_conjoint_copy(aligned, 1 << shift, false); return __ addr_at(start_off); } - address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) { - StubCodeMark mark(this, "StubRoutines", name); + address generate_conjoint_oop_copy(StubGenStubId stub_id) { + bool aligned; + bool dest_uninitialized; + address nooverlap_target; + switch (stub_id) { + case oop_arraycopy_id: + aligned = false; + dest_uninitialized = false; + nooverlap_target = StubRoutines::oop_disjoint_arraycopy(dest_uninitialized); + break; + case arrayof_oop_arraycopy_id: + aligned = true; + dest_uninitialized = false; + nooverlap_target = StubRoutines::arrayof_oop_disjoint_arraycopy(dest_uninitialized); + break; + case oop_arraycopy_uninit_id: + aligned = false; + dest_uninitialized = true; + nooverlap_target = StubRoutines::oop_disjoint_arraycopy(dest_uninitialized); + break; + case arrayof_oop_arraycopy_uninit_id: + aligned = true; + dest_uninitialized = true; + nooverlap_target = StubRoutines::arrayof_oop_disjoint_arraycopy(dest_uninitialized); + break; + default: + ShouldNotReachHere(); + } + StubCodeMark mark(this, stub_id); // This is the zarch specific stub generator for overlapping oop array copy. // Refer to generate_conjoint_copy for a list of prereqs and features. unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). unsigned int size = UseCompressedOops ? 4 : 8; unsigned int shift = UseCompressedOops ? 2 : 3; - address nooverlap_target = aligned ? StubRoutines::arrayof_oop_disjoint_arraycopy(dest_uninitialized) - : StubRoutines::oop_disjoint_arraycopy(dest_uninitialized); - // Branch to disjoint_copy (if applicable) before pre_barrier to avoid double pre_barrier. array_overlap_test(nooverlap_target, shift); // Branch away to nooverlap_target if disjoint. @@ -1425,33 +1473,33 @@ class StubGenerator: public StubCodeGenerator { // Note: the disjoint stubs must be generated first, some of // the conjoint stubs use them. - StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy (false, "jbyte_disjoint_arraycopy"); - StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy"); - StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy (false, "jint_disjoint_arraycopy"); - StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_copy (false, "jlong_disjoint_arraycopy"); - StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy (false, "oop_disjoint_arraycopy", false); - StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy (false, "oop_disjoint_arraycopy_uninit", true); - - StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy (true, "arrayof_jbyte_disjoint_arraycopy"); - StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, "arrayof_jshort_disjoint_arraycopy"); - StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy (true, "arrayof_jint_disjoint_arraycopy"); - StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy (true, "arrayof_jlong_disjoint_arraycopy"); - StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy (true, "arrayof_oop_disjoint_arraycopy", false); - StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy (true, "arrayof_oop_disjoint_arraycopy_uninit", true); - - StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy (false, "jbyte_arraycopy"); - StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, "jshort_arraycopy"); - StubRoutines::_jint_arraycopy = generate_conjoint_int_copy (false, "jint_arraycopy"); - StubRoutines::_jlong_arraycopy = generate_conjoint_long_copy (false, "jlong_arraycopy"); - StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy (false, "oop_arraycopy", false); - StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy (false, "oop_arraycopy_uninit", true); - - StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy (true, "arrayof_jbyte_arraycopy"); - StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, "arrayof_jshort_arraycopy"); - StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy (true, "arrayof_jint_arraycopy"); - StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_long_copy (true, "arrayof_jlong_arraycopy"); - StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy (true, "arrayof_oop_arraycopy", false); - StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy (true, "arrayof_oop_arraycopy_uninit", true); + StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubGenStubId::jbyte_disjoint_arraycopy_id); + StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_nonoop_copy(StubGenStubId::jshort_disjoint_arraycopy_id); + StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubGenStubId::jint_disjoint_arraycopy_id); + StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubGenStubId::jlong_disjoint_arraycopy_id); + StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy (StubGenStubId::oop_disjoint_arraycopy_id); + StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy (StubGenStubId::oop_disjoint_arraycopy_uninit_id); + + StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubGenStubId::arrayof_jbyte_disjoint_arraycopy_id); + StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_nonoop_copy(StubGenStubId::arrayof_jshort_disjoint_arraycopy_id); + StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubGenStubId::arrayof_jint_disjoint_arraycopy_id); + StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubGenStubId::arrayof_jlong_disjoint_arraycopy_id); + StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy (StubGenStubId::arrayof_oop_disjoint_arraycopy_id); + StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy (StubGenStubId::arrayof_oop_disjoint_arraycopy_uninit_id); + + StubRoutines::_jbyte_arraycopy = generate_conjoint_nonoop_copy(StubGenStubId::jbyte_arraycopy_id); + StubRoutines::_jshort_arraycopy = generate_conjoint_nonoop_copy(StubGenStubId::jshort_arraycopy_id); + StubRoutines::_jint_arraycopy = generate_conjoint_nonoop_copy(StubGenStubId::jint_arraycopy_id); + StubRoutines::_jlong_arraycopy = generate_conjoint_nonoop_copy(StubGenStubId::jlong_arraycopy_id); + StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy(StubGenStubId::oop_arraycopy_id); + StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(StubGenStubId::oop_arraycopy_uninit_id); + + StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_nonoop_copy(StubGenStubId::arrayof_jbyte_arraycopy_id); + StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_nonoop_copy(StubGenStubId::arrayof_jshort_arraycopy_id); + StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_nonoop_copy (StubGenStubId::arrayof_jint_arraycopy_id); + StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_nonoop_copy(StubGenStubId::arrayof_jlong_arraycopy_id); + StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy(StubGenStubId::arrayof_oop_arraycopy_id); + StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(StubGenStubId::arrayof_oop_arraycopy_uninit_id); } // Call interface for AES_encryptBlock, AES_decryptBlock stubs. @@ -1733,9 +1781,10 @@ class StubGenerator: public StubCodeGenerator { } // Compute AES encrypt function. - address generate_AES_encryptBlock(const char* name) { + address generate_AES_encryptBlock() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubGenStubId stub_id = StubGenStubId::aescrypt_encryptBlock_id; + StubCodeMark mark(this, stub_id); unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). generate_AES_cipherBlock(false); @@ -1744,9 +1793,10 @@ class StubGenerator: public StubCodeGenerator { } // Compute AES decrypt function. - address generate_AES_decryptBlock(const char* name) { + address generate_AES_decryptBlock() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubGenStubId stub_id = StubGenStubId::aescrypt_decryptBlock_id; + StubCodeMark mark(this, stub_id); unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). generate_AES_cipherBlock(true); @@ -1804,9 +1854,10 @@ class StubGenerator: public StubCodeGenerator { } // Compute chained AES encrypt function. - address generate_cipherBlockChaining_AES_encrypt(const char* name) { + address generate_cipherBlockChaining_AES_encrypt() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_encryptAESCrypt_id; + StubCodeMark mark(this, stub_id); unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). generate_AES_cipherBlockChaining(false); @@ -1815,9 +1866,10 @@ class StubGenerator: public StubCodeGenerator { } // Compute chained AES decrypt function. - address generate_cipherBlockChaining_AES_decrypt(const char* name) { + address generate_cipherBlockChaining_AES_decrypt() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_decryptAESCrypt_id; + StubCodeMark mark(this, stub_id); unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). generate_AES_cipherBlockChaining(true); @@ -2521,9 +2573,10 @@ class StubGenerator: public StubCodeGenerator { // Compute AES-CTR crypto function. // Encrypt or decrypt is selected via parameters. Only one stub is necessary. - address generate_counterMode_AESCrypt(const char* name) { + address generate_counterMode_AESCrypt() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubGenStubId stub_id = StubGenStubId::counterMode_AESCrypt_id; + StubCodeMark mark(this, stub_id); unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). generate_counterMode_AES(false); @@ -2536,7 +2589,8 @@ class StubGenerator: public StubCodeGenerator { // Compute GHASH function. address generate_ghash_processBlocks() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); + StubGenStubId stub_id = StubGenStubId::ghash_processBlocks_id; + StubCodeMark mark(this, stub_id); unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). const Register state = Z_ARG1; @@ -2613,9 +2667,20 @@ class StubGenerator: public StubCodeGenerator { // provides for a large enough source data buffer. // // Compute SHA-1 function. - address generate_SHA1_stub(bool multiBlock, const char* name) { + address generate_SHA1_stub(StubGenStubId stub_id) { + bool multiBlock; + switch (stub_id) { + case sha1_implCompress_id: + multiBlock = false; + break; + case sha1_implCompressMB_id: + multiBlock = true; + break; + default: + ShouldNotReachHere(); + } __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). const Register srcBuff = Z_ARG1; // Points to first block to process (offset already added). @@ -2695,9 +2760,20 @@ class StubGenerator: public StubCodeGenerator { } // Compute SHA-256 function. - address generate_SHA256_stub(bool multiBlock, const char* name) { + address generate_SHA256_stub(StubGenStubId stub_id) { + bool multiBlock; + switch (stub_id) { + case sha256_implCompress_id: + multiBlock = false; + break; + case sha256_implCompressMB_id: + multiBlock = true; + break; + default: + ShouldNotReachHere(); + } __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). const Register srcBuff = Z_ARG1; @@ -2775,9 +2851,20 @@ class StubGenerator: public StubCodeGenerator { } // Compute SHA-512 function. - address generate_SHA512_stub(bool multiBlock, const char* name) { + address generate_SHA512_stub(StubGenStubId stub_id) { + bool multiBlock; + switch (stub_id) { + case sha512_implCompress_id: + multiBlock = false; + break; + case sha512_implCompressMB_id: + multiBlock = true; + break; + default: + ShouldNotReachHere(); + } __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). const Register srcBuff = Z_ARG1; @@ -2867,7 +2954,7 @@ class StubGenerator: public StubCodeGenerator { * Z_RET - int crc result **/ // Compute CRC function (generic, for all polynomials). - void generate_CRC_updateBytes(const char* name, Register table, bool invertCRC) { + void generate_CRC_updateBytes(Register table, bool invertCRC) { // arguments to kernel_crc32: Register crc = Z_ARG1; // Current checksum, preset by caller or result from previous call, int. @@ -2898,18 +2985,19 @@ class StubGenerator: public StubCodeGenerator { // Compute CRC32 function. - address generate_CRC32_updateBytes(const char* name) { + address generate_CRC32_updateBytes() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubGenStubId stub_id = StubGenStubId::updateBytesCRC32_id; + StubCodeMark mark(this, stub_id); unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). - assert(UseCRC32Intrinsics, "should not generate this stub (%s) with CRC32 intrinsics disabled", name); + assert(UseCRC32Intrinsics, "should not generate this stub (%s) with CRC32 intrinsics disabled", StubRoutines::get_stub_name(stub_id)); BLOCK_COMMENT("CRC32_updateBytes {"); Register table = Z_ARG4; // crc32 table address. StubRoutines::zarch::generate_load_crc_table_addr(_masm, table); - generate_CRC_updateBytes(name, table, true); + generate_CRC_updateBytes(table, true); BLOCK_COMMENT("} CRC32_updateBytes"); return __ addr_at(start_off); @@ -2917,18 +3005,19 @@ class StubGenerator: public StubCodeGenerator { // Compute CRC32C function. - address generate_CRC32C_updateBytes(const char* name) { + address generate_CRC32C_updateBytes() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubGenStubId stub_id = StubGenStubId::updateBytesCRC32C_id; + StubCodeMark mark(this, stub_id); unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). - assert(UseCRC32CIntrinsics, "should not generate this stub (%s) with CRC32C intrinsics disabled", name); + assert(UseCRC32CIntrinsics, "should not generate this stub (%s) with CRC32C intrinsics disabled", StubRoutines::get_stub_name(stub_id)); BLOCK_COMMENT("CRC32C_updateBytes {"); Register table = Z_ARG4; // crc32c table address. StubRoutines::zarch::generate_load_crc32c_table_addr(_masm, table); - generate_CRC_updateBytes(name, table, false); + generate_CRC_updateBytes(table, false); BLOCK_COMMENT("} CRC32C_updateBytes"); return __ addr_at(start_off); @@ -2943,7 +3032,8 @@ class StubGenerator: public StubCodeGenerator { // Z_ARG5 - z address address generate_multiplyToLen() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); + StubGenStubId stub_id = StubGenStubId::multiplyToLen_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -2974,7 +3064,8 @@ class StubGenerator: public StubCodeGenerator { address generate_method_entry_barrier() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "nmethod_entry_barrier"); + StubGenStubId stub_id = StubGenStubId::method_entry_barrier_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3039,7 +3130,8 @@ class StubGenerator: public StubCodeGenerator { // exception handler for upcall stubs address generate_upcall_stub_exception_handler() { - StubCodeMark mark(this, "StubRoutines", "upcall stub exception handler"); + StubGenStubId stub_id = StubGenStubId::upcall_stub_exception_handler_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); // Native caller has no idea how to handle exceptions, @@ -3056,7 +3148,8 @@ class StubGenerator: public StubCodeGenerator { // Z_ARG1 = jobject receiver // Z_method = Method* result address generate_upcall_stub_load_target() { - StubCodeMark mark(this, "StubRoutines", "upcall_stub_load_target"); + StubGenStubId stub_id = StubGenStubId::upcall_stub_load_target_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ resolve_global_jobject(Z_ARG1, Z_tmp_1, Z_tmp_2); @@ -3093,12 +3186,12 @@ class StubGenerator: public StubCodeGenerator { if (UseCRC32Intrinsics) { StubRoutines::_crc_table_adr = (address)StubRoutines::zarch::_crc_table; - StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes"); + StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes(); } if (UseCRC32CIntrinsics) { StubRoutines::_crc32c_table_addr = (address)StubRoutines::zarch::_crc32c_table; - StubRoutines::_updateBytesCRC32C = generate_CRC32C_updateBytes("CRC32C_updateBytes"); + StubRoutines::_updateBytesCRC32C = generate_CRC32C_updateBytes(); } // Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction. @@ -3117,8 +3210,6 @@ class StubGenerator: public StubCodeGenerator { void generate_final_stubs() { // Generates all stubs and initializes the entry points. - StubRoutines::zarch::_partial_subtype_check = generate_partial_subtype_check(); - // Support for verify_oop (must happen after universe_init). StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop_subroutine(); @@ -3131,19 +3222,31 @@ class StubGenerator: public StubCodeGenerator { StubRoutines::_method_entry_barrier = generate_method_entry_barrier(); } +#ifdef COMPILER2 + if (UseSecondarySupersTable) { + StubRoutines::_lookup_secondary_supers_table_slow_path_stub = generate_lookup_secondary_supers_table_slow_path_stub(); + if (!InlineSecondarySupersTest) { + generate_lookup_secondary_supers_table_stub(); + } + } +#endif // COMPILER2 + StubRoutines::_upcall_stub_exception_handler = generate_upcall_stub_exception_handler(); StubRoutines::_upcall_stub_load_target = generate_upcall_stub_load_target(); } void generate_compiler_stubs() { + + StubRoutines::zarch::_partial_subtype_check = generate_partial_subtype_check(); + #if COMPILER2_OR_JVMCI // Generate AES intrinsics code. if (UseAESIntrinsics) { if (VM_Version::has_Crypto_AES()) { - StubRoutines::_aescrypt_encryptBlock = generate_AES_encryptBlock("AES_encryptBlock"); - StubRoutines::_aescrypt_decryptBlock = generate_AES_decryptBlock("AES_decryptBlock"); - StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_AES_encrypt("AES_encryptBlock_chaining"); - StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_AES_decrypt("AES_decryptBlock_chaining"); + StubRoutines::_aescrypt_encryptBlock = generate_AES_encryptBlock(); + StubRoutines::_aescrypt_decryptBlock = generate_AES_decryptBlock(); + StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_AES_encrypt(); + StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_AES_decrypt(); } else { // In PRODUCT builds, the function pointers will keep their initial (null) value. // LibraryCallKit::try_to_inline() will return false then, preventing the intrinsic to be called. @@ -3153,7 +3256,7 @@ class StubGenerator: public StubCodeGenerator { if (UseAESCTRIntrinsics) { if (VM_Version::has_Crypto_AES_CTR()) { - StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt("counterMode_AESCrypt"); + StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt(); } else { // In PRODUCT builds, the function pointers will keep their initial (null) value. // LibraryCallKit::try_to_inline() will return false then, preventing the intrinsic to be called. @@ -3168,16 +3271,16 @@ class StubGenerator: public StubCodeGenerator { // Generate SHA1/SHA256/SHA512 intrinsics code. if (UseSHA1Intrinsics) { - StubRoutines::_sha1_implCompress = generate_SHA1_stub(false, "SHA1_singleBlock"); - StubRoutines::_sha1_implCompressMB = generate_SHA1_stub(true, "SHA1_multiBlock"); + StubRoutines::_sha1_implCompress = generate_SHA1_stub(StubGenStubId::sha1_implCompress_id); + StubRoutines::_sha1_implCompressMB = generate_SHA1_stub(StubGenStubId::sha1_implCompressMB_id); } if (UseSHA256Intrinsics) { - StubRoutines::_sha256_implCompress = generate_SHA256_stub(false, "SHA256_singleBlock"); - StubRoutines::_sha256_implCompressMB = generate_SHA256_stub(true, "SHA256_multiBlock"); + StubRoutines::_sha256_implCompress = generate_SHA256_stub(StubGenStubId::sha256_implCompress_id); + StubRoutines::_sha256_implCompressMB = generate_SHA256_stub(StubGenStubId::sha256_implCompressMB_id); } if (UseSHA512Intrinsics) { - StubRoutines::_sha512_implCompress = generate_SHA512_stub(false, "SHA512_singleBlock"); - StubRoutines::_sha512_implCompressMB = generate_SHA512_stub(true, "SHA512_multiBlock"); + StubRoutines::_sha512_implCompress = generate_SHA512_stub(StubGenStubId::sha512_implCompress_id); + StubRoutines::_sha512_implCompressMB = generate_SHA512_stub(StubGenStubId::sha512_implCompressMB_id); } #ifdef COMPILER2 @@ -3192,35 +3295,27 @@ class StubGenerator: public StubCodeGenerator { StubRoutines::_montgomerySquare = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); } - if (UseSecondarySupersTable) { - StubRoutines::_lookup_secondary_supers_table_slow_path_stub = generate_lookup_secondary_supers_table_slow_path_stub(); - if (!InlineSecondarySupersTest) { - for (int slot = 0; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) { - StubRoutines::_lookup_secondary_supers_table_stubs[slot] = generate_lookup_secondary_supers_table_stub(slot); - } - } - } #endif #endif // COMPILER2_OR_JVMCI } public: - StubGenerator(CodeBuffer* code, StubsKind kind) : StubCodeGenerator(code) { - switch(kind) { - case Initial_stubs: + StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) { + switch(blob_id) { + case initial_id: generate_initial_stubs(); break; - case Continuation_stubs: + case continuation_id: generate_continuation_stubs(); break; - case Compiler_stubs: + case compiler_id: generate_compiler_stubs(); break; - case Final_stubs: + case final_id: generate_final_stubs(); break; default: - fatal("unexpected stubs kind: %d", kind); + fatal("unexpected blob id: %d", blob_id); break; }; } @@ -3259,6 +3354,6 @@ class StubGenerator: public StubCodeGenerator { }; -void StubGenerator_generate(CodeBuffer* code, StubCodeGenerator::StubsKind kind) { - StubGenerator g(code, kind); +void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id) { + StubGenerator g(code, blob_id); } diff --git a/src/hotspot/cpu/s390/stubRoutines_s390.cpp b/src/hotspot/cpu/s390/stubRoutines_s390.cpp index 815cffd3f72a6..e75928ad00e61 100644 --- a/src/hotspot/cpu/s390/stubRoutines_s390.cpp +++ b/src/hotspot/cpu/s390/stubRoutines_s390.cpp @@ -32,7 +32,18 @@ // Implementation of the platform-specific part of StubRoutines - for // a description of how to extend it, see the stubRoutines.hpp file. -address StubRoutines::zarch::_partial_subtype_check = nullptr; +// define fields for arch-specific entries + +#define DEFINE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) \ + address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = nullptr; + +#define DEFINE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \ + address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = CAST_FROM_FN_PTR(address, init_function); + +STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY, DEFINE_ARCH_ENTRY_INIT) + +#undef DEFINE_ARCH_ENTRY_INIT +#undef DEFINE_ARCH_ENTRY // Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction. address StubRoutines::zarch::_trot_table_addr = nullptr; diff --git a/src/hotspot/cpu/s390/stubRoutines_s390.hpp b/src/hotspot/cpu/s390/stubRoutines_s390.hpp index 7116d441715ad..7a4bc18eb7d4c 100644 --- a/src/hotspot/cpu/s390/stubRoutines_s390.hpp +++ b/src/hotspot/cpu/s390/stubRoutines_s390.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -31,14 +31,17 @@ static bool returns_to_call_stub(address return_pc) { return return_pc == _call_stub_return_address; } -enum { // Platform dependent constants. - // simply increase sizes if too small (assembler will crash if too small) - _initial_stubs_code_size = 20000, - _continuation_stubs_code_size = 2000, - _compiler_stubs_code_size = 20000, - _final_stubs_code_size = 20000 +// emit enum used to size per-blob code buffers + +#define DEFINE_BLOB_SIZE(blob_name, size) \ + _ ## blob_name ## _code_size = size, + +enum platform_dependent_constants { + STUBGEN_ARCH_BLOBS_DO(DEFINE_BLOB_SIZE) }; +#undef DEFINE_BLOB_SIZE + // MethodHandles adapters enum method_handles_platform_dependent_constants { method_handles_adapters_code_size = 5000 @@ -69,10 +72,24 @@ class zarch { locked = 1 }; + // declare fields for arch-specific entries + +#define DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) \ + static address STUB_FIELD_NAME(field_name) ; + +#define DECLARE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \ + DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) + +private: + STUBGEN_ARCH_ENTRIES_DO(DECLARE_ARCH_ENTRY, DECLARE_ARCH_ENTRY_INIT) + +#undef DECLARE_ARCH_ENTRY_INIT +#undef DECLARE_ARCH_ENTRY + private: + static int _atomic_memory_operation_lock; - static address _partial_subtype_check; static juint _crc_table[CRC32_TABLES][CRC32_COLUMN_SIZE]; static juint _crc32c_table[CRC32_TABLES][CRC32_COLUMN_SIZE]; @@ -81,6 +98,20 @@ class zarch { static jlong _trot_table[TROT_COLUMN_SIZE]; public: + + // declare getters for arch-specific entries + +#define DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name) \ + static address getter_name() { return STUB_FIELD_NAME(field_name) ; } + +#define DEFINE_ARCH_ENTRY_GETTER_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \ + DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name) + + STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY_GETTER, DEFINE_ARCH_ENTRY_GETTER_INIT) + +#undef DEFINE_ARCH_ENTRY_GETTER_INIT +#undef DEFINE_ARCH_ENTRY_GETTER + // Global lock for everyone who needs to use atomic_compare_and_exchange // or atomic_increment -- should probably use more locks for more // scalability -- for instance one for each eden space or group of. @@ -92,8 +123,6 @@ class zarch { static int atomic_memory_operation_lock() { return _atomic_memory_operation_lock; } static void set_atomic_memory_operation_lock(int value) { _atomic_memory_operation_lock = value; } - static address partial_subtype_check() { return _partial_subtype_check; } - static void generate_load_absolute_address(MacroAssembler* masm, Register table, address table_addr, uint64_t table_contents); static void generate_load_crc_table_addr(MacroAssembler* masm, Register table); static void generate_load_crc32c_table_addr(MacroAssembler* masm, Register table); diff --git a/src/hotspot/cpu/x86/c2_stubGenerator_x86_64_string.cpp b/src/hotspot/cpu/x86/c2_stubGenerator_x86_64_string.cpp index 7dadb15ef91a8..ee2d6d8a0bedc 100644 --- a/src/hotspot/cpu/x86/c2_stubGenerator_x86_64_string.cpp +++ b/src/hotspot/cpu/x86/c2_stubGenerator_x86_64_string.cpp @@ -199,13 +199,14 @@ void StubGenerator::generate_string_indexof(address *fnptrs) { static void generate_string_indexof_stubs(StubGenerator *stubgen, address *fnptrs, StrIntrinsicNode::ArgEncoding ae, MacroAssembler *_masm) { - StubCodeMark mark(stubgen, "StubRoutines", "stringIndexOf"); bool isLL = (ae == StrIntrinsicNode::LL); bool isUL = (ae == StrIntrinsicNode::UL); bool isUU = (ae == StrIntrinsicNode::UU); bool isU = isUL || isUU; // At least one is UTF-16 assert(isLL || isUL || isUU, "Encoding not recognized"); + StubGenStubId stub_id = (isLL ? StubGenStubId::string_indexof_linear_ll_id : (isUL ? StubGenStubId::string_indexof_linear_ul_id : StubGenStubId::string_indexof_linear_uu_id)); + StubCodeMark mark(stubgen, stub_id); // Keep track of isUL since we need to generate UU code in the main body // for the case where we expand the needle from bytes to words on the stack. // This is done at L_wcharBegin. The algorithm used is: diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp index 0830b6b098387..1c7f851347e0d 100644 --- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp @@ -9136,14 +9136,14 @@ void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Registe Label L_exit; if (is_pclmulqdq_supported ) { - const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr; - const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr+1); + const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::crc32c_table_addr(); + const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 1); - const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2); - const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3); + const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 2); + const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 3); - const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4); - const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5); + const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 4); + const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 5); assert((CRC32C_NUM_PRECOMPUTED_CONSTANTS - 1 ) == 5, "Checking whether you declared all of the constants based on the number of \"chunks\""); } else { const_or_pre_comp_const_index[0] = 1; @@ -9216,14 +9216,14 @@ void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Registe Label L_exit; if (is_pclmulqdq_supported) { - const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr; - const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 1); + const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::crc32c_table_addr(); + const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 1); - const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2); - const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3); + const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 2); + const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 3); - const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4); - const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5); + const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 4); + const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 5); } else { const_or_pre_comp_const_index[0] = 1; const_or_pre_comp_const_index[1] = 0; diff --git a/src/hotspot/cpu/x86/stubDeclarations_x86.hpp b/src/hotspot/cpu/x86/stubDeclarations_x86.hpp new file mode 100644 index 0000000000000..ffa8bc80fb709 --- /dev/null +++ b/src/hotspot/cpu/x86/stubDeclarations_x86.hpp @@ -0,0 +1,262 @@ +/* + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2025, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_X86_STUBDECLARATIONS_HPP +#define CPU_X86_STUBDECLARATIONS_HPP + +#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \ + do_arch_blob, \ + do_arch_entry, \ + do_arch_entry_init) \ + do_arch_blob(initial, 20000 WINDOWS_ONLY(+1000)) \ + do_stub(initial, verify_mxcsr) \ + do_arch_entry(x86, initial, verify_mxcsr, verify_mxcsr_entry, \ + verify_mxcsr_entry) \ + LP64_ONLY( \ + do_stub(initial, get_previous_sp) \ + do_arch_entry(x86, initial, get_previous_sp, \ + get_previous_sp_entry, \ + get_previous_sp_entry) \ + do_stub(initial, f2i_fixup) \ + do_arch_entry(x86, initial, f2i_fixup, f2i_fixup, f2i_fixup) \ + do_stub(initial, f2l_fixup) \ + do_arch_entry(x86, initial, f2l_fixup, f2l_fixup, f2l_fixup) \ + do_stub(initial, d2i_fixup) \ + do_arch_entry(x86, initial, d2i_fixup, d2i_fixup, d2i_fixup) \ + do_stub(initial, d2l_fixup) \ + do_arch_entry(x86, initial, d2l_fixup, d2l_fixup, d2l_fixup) \ + do_stub(initial, float_sign_mask) \ + do_arch_entry(x86, initial, float_sign_mask, float_sign_mask, \ + float_sign_mask) \ + do_stub(initial, float_sign_flip) \ + do_arch_entry(x86, initial, float_sign_flip, float_sign_flip, \ + float_sign_flip) \ + do_stub(initial, double_sign_mask) \ + do_arch_entry(x86, initial, double_sign_mask, double_sign_mask, \ + double_sign_mask) \ + do_stub(initial, double_sign_flip) \ + do_arch_entry(x86, initial, double_sign_flip, double_sign_flip, \ + double_sign_flip) \ + ) \ + NOT_LP64( \ + do_stub(initial, verify_fpu_cntrl_word) \ + do_arch_entry(x86, initial, verify_fpu_cntrl_word, \ + verify_fpu_cntrl_wrd_entry, \ + verify_fpu_cntrl_wrd_entry) \ + do_stub(initial, d2i_wrapper) \ + do_arch_entry(x86, initial, d2i_wrapper, d2i_wrapper, \ + d2i_wrapper) \ + do_stub(initial, d2l_wrapper) \ + do_arch_entry(x86, initial, d2l_wrapper, d2l_wrapper, \ + d2l_wrapper) \ + ) \ + + +#define STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, \ + do_arch_blob, \ + do_arch_entry, \ + do_arch_entry_init) \ + do_arch_blob(continuation, 1000 LP64_ONLY(+2000)) \ + + +#define STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, \ + do_arch_blob, \ + do_arch_entry, \ + do_arch_entry_init) \ + do_arch_blob(compiler, 20000 LP64_ONLY(+60000) WINDOWS_ONLY(+2000)) \ + do_stub(compiler, vector_float_sign_mask) \ + do_arch_entry(x86, compiler, vector_float_sign_mask, \ + vector_float_sign_mask, vector_float_sign_mask) \ + do_stub(compiler, vector_float_sign_flip) \ + do_arch_entry(x86, compiler, vector_float_sign_flip, \ + vector_float_sign_flip, vector_float_sign_flip) \ + do_stub(compiler, vector_double_sign_mask) \ + do_arch_entry(x86, compiler, vector_double_sign_mask, \ + vector_double_sign_mask, vector_double_sign_mask) \ + do_stub(compiler, vector_double_sign_flip) \ + do_arch_entry(x86, compiler, vector_double_sign_flip, \ + vector_double_sign_flip, vector_double_sign_flip) \ + do_stub(compiler, vector_all_bits_set) \ + do_arch_entry(x86, compiler, vector_all_bits_set, \ + vector_all_bits_set, vector_all_bits_set) \ + do_stub(compiler, vector_int_mask_cmp_bits) \ + do_arch_entry(x86, compiler, vector_int_mask_cmp_bits, \ + vector_int_mask_cmp_bits, vector_int_mask_cmp_bits) \ + do_stub(compiler, vector_short_to_byte_mask) \ + do_arch_entry(x86, compiler, vector_short_to_byte_mask, \ + vector_short_to_byte_mask, vector_short_to_byte_mask) \ + do_stub(compiler, vector_byte_perm_mask) \ + do_arch_entry(x86, compiler,vector_byte_perm_mask, \ + vector_byte_perm_mask, vector_byte_perm_mask) \ + do_stub(compiler, vector_int_to_byte_mask) \ + do_arch_entry(x86, compiler, vector_int_to_byte_mask, \ + vector_int_to_byte_mask, vector_int_to_byte_mask) \ + do_stub(compiler, vector_int_to_short_mask) \ + do_arch_entry(x86, compiler, vector_int_to_short_mask, \ + vector_int_to_short_mask, vector_int_to_short_mask) \ + do_stub(compiler, vector_32_bit_mask) \ + do_arch_entry(x86, compiler, vector_32_bit_mask, \ + vector_32_bit_mask, vector_32_bit_mask) \ + do_stub(compiler, vector_64_bit_mask) \ + do_arch_entry(x86, compiler, vector_64_bit_mask, \ + vector_64_bit_mask, vector_64_bit_mask) \ + do_stub(compiler, vector_byte_shuffle_mask) \ + do_arch_entry(x86, compiler, vector_int_shuffle_mask, \ + vector_byte_shuffle_mask, vector_byte_shuffle_mask) \ + do_stub(compiler, vector_short_shuffle_mask) \ + do_arch_entry(x86, compiler, vector_int_shuffle_mask, \ + vector_short_shuffle_mask, vector_short_shuffle_mask) \ + do_stub(compiler, vector_int_shuffle_mask) \ + do_arch_entry(x86, compiler, vector_int_shuffle_mask, \ + vector_int_shuffle_mask, vector_int_shuffle_mask) \ + do_stub(compiler, vector_long_shuffle_mask) \ + do_arch_entry(x86, compiler, vector_long_shuffle_mask, \ + vector_long_shuffle_mask, vector_long_shuffle_mask) \ + do_stub(compiler, vector_long_sign_mask) \ + do_arch_entry(x86, compiler, vector_long_sign_mask, \ + vector_long_sign_mask, vector_long_sign_mask) \ + do_stub(compiler, vector_iota_indices) \ + do_arch_entry(x86, compiler, vector_iota_indices, \ + vector_iota_indices, vector_iota_indices) \ + do_stub(compiler, vector_count_leading_zeros_lut) \ + do_arch_entry(x86, compiler, vector_count_leading_zeros_lut, \ + vector_count_leading_zeros_lut, \ + vector_count_leading_zeros_lut) \ + do_stub(compiler, vector_reverse_bit_lut) \ + do_arch_entry(x86, compiler, vector_reverse_bit_lut, \ + vector_reverse_bit_lut, vector_reverse_bit_lut) \ + do_stub(compiler, vector_reverse_byte_perm_mask_short) \ + do_arch_entry(x86, compiler, vector_reverse_byte_perm_mask_short, \ + vector_reverse_byte_perm_mask_short, \ + vector_reverse_byte_perm_mask_short) \ + do_stub(compiler, vector_reverse_byte_perm_mask_int) \ + do_arch_entry(x86, compiler, vector_reverse_byte_perm_mask_int, \ + vector_reverse_byte_perm_mask_int, \ + vector_reverse_byte_perm_mask_int) \ + do_stub(compiler, vector_reverse_byte_perm_mask_long) \ + do_arch_entry(x86, compiler, vector_reverse_byte_perm_mask_long, \ + vector_reverse_byte_perm_mask_long, \ + vector_reverse_byte_perm_mask_long) \ + do_stub(compiler, vector_popcount_lut) \ + do_arch_entry(x86, compiler, vector_popcount_lut, \ + vector_popcount_lut, vector_popcount_lut) \ + do_stub(compiler, upper_word_mask) \ + do_arch_entry(x86, compiler, upper_word_mask, upper_word_mask_addr, \ + upper_word_mask_addr) \ + do_stub(compiler, shuffle_byte_flip_mask) \ + do_arch_entry(x86, compiler, shuffle_byte_flip_mask, \ + shuffle_byte_flip_mask_addr, \ + shuffle_byte_flip_mask_addr) \ + do_stub(compiler, pshuffle_byte_flip_mask) \ + do_arch_entry(x86, compiler, pshuffle_byte_flip_mask, \ + pshuffle_byte_flip_mask_addr, \ + pshuffle_byte_flip_mask_addr) \ + LP64_ONLY( \ + /* x86_64 exposes these 3 stubs via a generic entry array */ \ + /* oher arches use arch-specific entries */ \ + /* this really needs rationalising */ \ + do_stub(compiler, string_indexof_linear_ll) \ + do_stub(compiler, string_indexof_linear_uu) \ + do_stub(compiler, string_indexof_linear_ul) \ + do_stub(compiler, pshuffle_byte_flip_mask_sha512) \ + do_arch_entry(x86, compiler, pshuffle_byte_flip_mask_sha512, \ + pshuffle_byte_flip_mask_addr_sha512, \ + pshuffle_byte_flip_mask_addr_sha512) \ + do_stub(compiler, compress_perm_table32) \ + do_arch_entry(x86, compiler, compress_perm_table32, \ + compress_perm_table32, compress_perm_table32) \ + do_stub(compiler, compress_perm_table64) \ + do_arch_entry(x86, compiler, compress_perm_table64, \ + compress_perm_table64, compress_perm_table64) \ + do_stub(compiler, expand_perm_table32) \ + do_arch_entry(x86, compiler, expand_perm_table32, \ + expand_perm_table32, expand_perm_table32) \ + do_stub(compiler, expand_perm_table64) \ + do_arch_entry(x86, compiler, expand_perm_table64, \ + expand_perm_table64, expand_perm_table64) \ + do_stub(compiler, avx2_shuffle_base64) \ + do_arch_entry(x86, compiler, avx2_shuffle_base64, \ + avx2_shuffle_base64, base64_avx2_shuffle_addr) \ + do_stub(compiler, avx2_input_mask_base64) \ + do_arch_entry(x86, compiler, avx2_input_mask_base64, \ + avx2_input_mask_base64, \ + base64_avx2_input_mask_addr) \ + do_stub(compiler, avx2_lut_base64) \ + do_arch_entry(x86, compiler, avx2_lut_base64, \ + avx2_lut_base64, base64_avx2_lut_addr) \ + do_stub(compiler, avx2_decode_tables_base64) \ + do_arch_entry(x86, compiler, avx2_decode_tables_base64, \ + avx2_decode_tables_base64, \ + base64_AVX2_decode_tables_addr) \ + do_stub(compiler, avx2_decode_lut_tables_base64) \ + do_arch_entry(x86, compiler, avx2_decode_lut_tables_base64, \ + avx2_decode_lut_tables_base64, \ + base64_AVX2_decode_LUT_tables_addr) \ + do_stub(compiler, shuffle_base64) \ + do_arch_entry(x86, compiler, shuffle_base64, shuffle_base64, \ + base64_shuffle_addr) \ + do_stub(compiler, lookup_lo_base64) \ + do_arch_entry(x86, compiler, lookup_lo_base64, lookup_lo_base64, \ + base64_vbmi_lookup_lo_addr) \ + do_stub(compiler, lookup_hi_base64) \ + do_arch_entry(x86, compiler, lookup_hi_base64, lookup_hi_base64, \ + base64_vbmi_lookup_hi_addr) \ + do_stub(compiler, lookup_lo_base64url) \ + do_arch_entry(x86, compiler, lookup_lo_base64url, \ + lookup_lo_base64url, \ + base64_vbmi_lookup_lo_url_addr) \ + do_stub(compiler, lookup_hi_base64url) \ + do_arch_entry(x86, compiler, lookup_hi_base64url, \ + lookup_hi_base64url, \ + base64_vbmi_lookup_hi_url_addr) \ + do_stub(compiler, pack_vec_base64) \ + do_arch_entry(x86, compiler, pack_vec_base64, pack_vec_base64, \ + base64_vbmi_pack_vec_addr) \ + do_stub(compiler, join_0_1_base64) \ + do_arch_entry(x86, compiler, join_0_1_base64, join_0_1_base64, \ + base64_vbmi_join_0_1_addr) \ + do_stub(compiler, join_1_2_base64) \ + do_arch_entry(x86, compiler, join_1_2_base64, join_1_2_base64, \ + base64_vbmi_join_1_2_addr) \ + do_stub(compiler, join_2_3_base64) \ + do_arch_entry(x86, compiler, join_2_3_base64, join_2_3_base64, \ + base64_vbmi_join_2_3_addr) \ + do_stub(compiler, encoding_table_base64) \ + do_arch_entry(x86, compiler, encoding_table_base64, \ + encoding_table_base64, base64_encoding_table_addr) \ + do_stub(compiler, decoding_table_base64) \ + do_arch_entry(x86, compiler, decoding_table_base64, \ + decoding_table_base64, base64_decoding_table_addr) \ + ) \ + + +#define STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, \ + do_arch_blob, \ + do_arch_entry, \ + do_arch_entry_init) \ + do_arch_blob(final, 11000 LP64_ONLY(+20000) \ + WINDOWS_ONLY(+22000) ZGC_ONLY(+20000)) \ + +#endif // CPU_X86_STUBDECLARATIONS_HPP diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp index d0e611e18d5ce..d3edd437a76cd 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp @@ -137,7 +137,8 @@ class StubGenerator: public StubCodeGenerator { address generate_call_stub(address& return_address) { - StubCodeMark mark(this, "StubRoutines", "call_stub"); + StubGenStubId stub_id = StubGenStubId::call_stub_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); // stub code parameters / addresses @@ -339,7 +340,8 @@ class StubGenerator: public StubCodeGenerator { // rax,: exception oop address generate_catch_exception() { - StubCodeMark mark(this, "StubRoutines", "catch_exception"); + StubGenStubId stub_id = StubGenStubId::catch_exception_id; + StubCodeMark mark(this, stub_id); const Address rsp_after_call(rbp, -4 * wordSize); // same as in generate_call_stub()! const Address thread (rbp, 9 * wordSize); // same as in generate_call_stub()! address start = __ pc(); @@ -382,7 +384,8 @@ class StubGenerator: public StubCodeGenerator { // NOTE: At entry of this stub, exception-pc must be on stack !! address generate_forward_exception() { - StubCodeMark mark(this, "StubRoutines", "forward exception"); + StubGenStubId stub_id = StubGenStubId::forward_exception_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); const Register thread = rcx; @@ -454,7 +457,8 @@ class StubGenerator: public StubCodeGenerator { address generate_verify_mxcsr() { - StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); + StubGenStubId stub_id = StubGenStubId::verify_mxcsr_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); const Address mxcsr_save(rsp, 0); @@ -493,7 +497,8 @@ class StubGenerator: public StubCodeGenerator { // FP control word to our expected state. address generate_verify_fpu_cntrl_wrd() { - StubCodeMark mark(this, "StubRoutines", "verify_spcw"); + StubGenStubId stub_id = StubGenStubId::verify_fpu_cntrl_word_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); const Address fpu_cntrl_wrd_save(rsp, 0); @@ -531,7 +536,8 @@ class StubGenerator: public StubCodeGenerator { // Output: rax, (rdx): integer (long) result address generate_d2i_wrapper(BasicType t, address fcn) { - StubCodeMark mark(this, "StubRoutines", "d2i_wrapper"); + StubGenStubId stub_id = StubGenStubId::d2i_wrapper_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); // Capture info about frame layout @@ -591,9 +597,9 @@ class StubGenerator: public StubCodeGenerator { } //--------------------------------------------------------------------------------------------------- - address generate_vector_mask(const char *stub_name, int32_t mask) { + address generate_vector_mask(StubGenStubId stub_id, int32_t mask) { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", stub_name); + StubCodeMark mark(this, stub_id); address start = __ pc(); for (int i = 0; i < 16; i++) { @@ -603,9 +609,10 @@ class StubGenerator: public StubCodeGenerator { return start; } - address generate_count_leading_zeros_lut(const char *stub_name) { + address generate_count_leading_zeros_lut() { __ align64(); - StubCodeMark mark(this, "StubRoutines", stub_name); + StubGenStubId stub_id = StubGenStubId::vector_count_leading_zeros_lut_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ emit_data(0x02020304, relocInfo::none, 0); __ emit_data(0x01010101, relocInfo::none, 0); @@ -627,9 +634,10 @@ class StubGenerator: public StubCodeGenerator { } - address generate_popcount_avx_lut(const char *stub_name) { + address generate_popcount_avx_lut() { __ align64(); - StubCodeMark mark(this, "StubRoutines", stub_name); + StubGenStubId stub_id = StubGenStubId::vector_popcount_lut_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ emit_data(0x02010100, relocInfo::none, 0); __ emit_data(0x03020201, relocInfo::none, 0); @@ -651,9 +659,10 @@ class StubGenerator: public StubCodeGenerator { } - address generate_iota_indices(const char *stub_name) { + address generate_iota_indices() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", stub_name); + StubGenStubId stub_id = StubGenStubId::vector_iota_indices_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); // B __ emit_data(0x03020100, relocInfo::none, 0); @@ -765,9 +774,10 @@ class StubGenerator: public StubCodeGenerator { return start; } - address generate_vector_reverse_bit_lut(const char *stub_name) { + address generate_vector_reverse_bit_lut() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", stub_name); + StubGenStubId stub_id = StubGenStubId::vector_reverse_bit_lut_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ emit_data(0x0C040800, relocInfo::none, 0); __ emit_data(0x0E060A02, relocInfo::none, 0); @@ -788,9 +798,10 @@ class StubGenerator: public StubCodeGenerator { return start; } - address generate_vector_reverse_byte_perm_mask_long(const char *stub_name) { + address generate_vector_reverse_byte_perm_mask_long() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", stub_name); + StubGenStubId stub_id = StubGenStubId::vector_reverse_byte_perm_mask_long_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ emit_data(0x04050607, relocInfo::none, 0); __ emit_data(0x00010203, relocInfo::none, 0); @@ -811,9 +822,10 @@ class StubGenerator: public StubCodeGenerator { return start; } - address generate_vector_reverse_byte_perm_mask_int(const char *stub_name) { + address generate_vector_reverse_byte_perm_mask_int() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", stub_name); + StubGenStubId stub_id = StubGenStubId::vector_reverse_byte_perm_mask_int_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ emit_data(0x00010203, relocInfo::none, 0); __ emit_data(0x04050607, relocInfo::none, 0); @@ -834,9 +846,10 @@ class StubGenerator: public StubCodeGenerator { return start; } - address generate_vector_reverse_byte_perm_mask_short(const char *stub_name) { + address generate_vector_reverse_byte_perm_mask_short() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", stub_name); + StubGenStubId stub_id = StubGenStubId::vector_reverse_byte_perm_mask_short_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ emit_data(0x02030001, relocInfo::none, 0); __ emit_data(0x06070405, relocInfo::none, 0); @@ -857,9 +870,10 @@ class StubGenerator: public StubCodeGenerator { return start; } - address generate_vector_byte_shuffle_mask(const char *stub_name) { + address generate_vector_byte_shuffle_mask() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", stub_name); + StubGenStubId stub_id = StubGenStubId::vector_byte_shuffle_mask_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ emit_data(0x70707070, relocInfo::none, 0); __ emit_data(0x70707070, relocInfo::none, 0); @@ -872,9 +886,9 @@ class StubGenerator: public StubCodeGenerator { return start; } - address generate_vector_mask_long_double(const char *stub_name, int32_t maskhi, int32_t masklo) { + address generate_vector_mask_long_double(StubGenStubId stub_id, int32_t maskhi, int32_t masklo) { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", stub_name); + StubCodeMark mark(this, stub_id); address start = __ pc(); for (int i = 0; i < 8; i++) { @@ -887,9 +901,10 @@ class StubGenerator: public StubCodeGenerator { //---------------------------------------------------------------------------------------------------- - address generate_vector_byte_perm_mask(const char *stub_name) { + address generate_vector_byte_perm_mask() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", stub_name); + StubGenStubId stub_id = StubGenStubId::vector_byte_perm_mask_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ emit_data(0x00000001, relocInfo::none, 0); @@ -912,13 +927,13 @@ class StubGenerator: public StubCodeGenerator { return start; } - address generate_vector_custom_i32(const char *stub_name, Assembler::AvxVectorLen len, + address generate_vector_custom_i32(StubGenStubId stub_id, Assembler::AvxVectorLen len, int32_t val0, int32_t val1, int32_t val2, int32_t val3, int32_t val4 = 0, int32_t val5 = 0, int32_t val6 = 0, int32_t val7 = 0, int32_t val8 = 0, int32_t val9 = 0, int32_t val10 = 0, int32_t val11 = 0, int32_t val12 = 0, int32_t val13 = 0, int32_t val14 = 0, int32_t val15 = 0) { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", stub_name); + StubCodeMark mark(this, stub_id); address start = __ pc(); assert(len != Assembler::AVX_NoVec, "vector len must be specified"); @@ -950,7 +965,8 @@ class StubGenerator: public StubCodeGenerator { // Non-destructive plausibility checks for oops address generate_verify_oop() { - StubCodeMark mark(this, "StubRoutines", "verify_oop"); + StubGenStubId stub_id = StubGenStubId::verify_oop_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); // Incoming arguments on stack after saving rax,: @@ -1082,12 +1098,82 @@ class StubGenerator: public StubCodeGenerator { __ BIND(L_exit); } - address generate_disjoint_copy(BasicType t, bool aligned, - Address::ScaleFactor sf, - address* entry, const char *name, - bool dest_uninitialized = false) { + address generate_disjoint_copy(StubGenStubId stub_id, address* entry) { + BasicType t; + bool aligned; + Address::ScaleFactor sf; + bool dest_uninitialized; + + switch (stub_id) { + case jbyte_disjoint_arraycopy_id: + t = T_BYTE; + aligned = false; + sf = Address::times_1; + dest_uninitialized = false; + break; + case arrayof_jbyte_disjoint_arraycopy_id: + t = T_BYTE; + aligned = true; + sf = Address::times_1; + dest_uninitialized = false; + break; + case jshort_disjoint_arraycopy_id: + t = T_SHORT; + aligned = false; + sf = Address::times_2; + dest_uninitialized = false; + break; + case arrayof_jshort_disjoint_arraycopy_id: + t = T_SHORT; + aligned = true; + sf = Address::times_2; + dest_uninitialized = false; + break; + case jint_disjoint_arraycopy_id: + t = T_INT; + aligned = true; + sf = Address::times_4; + dest_uninitialized = false; + break; + case arrayof_jint_disjoint_arraycopy_id: + // since this is always aligned we can (should!) use the same + // stub as for case jint_disjoint_arraycopy + ShouldNotReachHere(); + break; + case jlong_disjoint_arraycopy_id: + case arrayof_jlong_disjoint_arraycopy_id: + // Handled by a special generator routine on 32 bit + ShouldNotReachHere(); + break; + case oop_disjoint_arraycopy_id: + t = T_OBJECT; + aligned = true; + sf = Address::times_ptr; + dest_uninitialized = false; + break; + case arrayof_oop_disjoint_arraycopy_id: + // since this is always aligned we can (should!) use the same + // stub as for case oop_disjoint_arraycopy + ShouldNotReachHere(); + break; + case oop_disjoint_arraycopy_uninit_id: + t = T_OBJECT; + aligned = true; + sf = Address::times_ptr; + dest_uninitialized = true; + break; + case arrayof_oop_disjoint_arraycopy_uninit_id: + // since this is always aligned we can (should!) use the same + // stub as for case oop_disjoint_arraycopy_uninit + ShouldNotReachHere(); + break; + default: + ShouldNotReachHere(); + break; + } + __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); Label L_0_count, L_exit, L_skip_align1, L_skip_align2, L_copy_byte; @@ -1230,9 +1316,41 @@ class StubGenerator: public StubCodeGenerator { } - address generate_fill(BasicType t, bool aligned, const char *name) { + address generate_fill(StubGenStubId stub_id) { + BasicType t; + bool aligned; + switch(stub_id) { + case jbyte_fill_id: + t = T_BYTE; + aligned = false; + break; + case jshort_fill_id: + t = T_SHORT; + aligned = false; + break; + case jint_fill_id: + t = T_INT; + aligned = false; + break; + case arrayof_jbyte_fill_id: + t = T_BYTE; + aligned = true; + break; + case arrayof_jshort_fill_id: + t = T_SHORT; + aligned = true; + break; + case arrayof_jint_fill_id: + t = T_INT; + aligned = true; + break; + default: + ShouldNotReachHere(); + break; + } + __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); BLOCK_COMMENT("Entry:"); @@ -1257,13 +1375,84 @@ class StubGenerator: public StubCodeGenerator { return start; } - address generate_conjoint_copy(BasicType t, bool aligned, - Address::ScaleFactor sf, + address generate_conjoint_copy(StubGenStubId stub_id, address nooverlap_target, - address* entry, const char *name, - bool dest_uninitialized = false) { + address* entry) { + BasicType t; + bool aligned; + Address::ScaleFactor sf; + bool dest_uninitialized; + + switch (stub_id) { + case jbyte_arraycopy_id: + t = T_BYTE; + aligned = false; + sf = Address::times_1; + dest_uninitialized = false; + break; + case arrayof_jbyte_arraycopy_id: + t = T_BYTE; + aligned = true; + sf = Address::times_1; + dest_uninitialized = false; + break; + case jshort_arraycopy_id: + t = T_SHORT; + aligned = false; + sf = Address::times_2; + dest_uninitialized = false; + break; + case arrayof_jshort_arraycopy_id: + t = T_SHORT; + aligned = true; + sf = Address::times_2; + dest_uninitialized = false; + break; + case jint_arraycopy_id: + t = T_INT; + aligned = true; + sf = Address::times_4; + dest_uninitialized = false; + break; + case arrayof_jint_arraycopy_id: + // since this is always aligned we can (should!) use the same + // stub as for case jint_arraycopy + ShouldNotReachHere(); + break; + case jlong_arraycopy_id: + case arrayof_jlong_arraycopy_id: + // Handled by a special generator routine on 32 bit + ShouldNotReachHere(); + break; + case oop_arraycopy_id: + t = T_OBJECT; + aligned = true; + sf = Address::times_ptr; + dest_uninitialized = false; + break; + case arrayof_oop_arraycopy_id: + // since this is always aligned we can (should!) use the same + // stub as for case oop_arraycopy + ShouldNotReachHere(); + break; + case oop_arraycopy_uninit_id: + t = T_OBJECT; + aligned = true; + sf = Address::times_ptr; + dest_uninitialized = true; + break; + case arrayof_oop_arraycopy_uninit_id: + // since this is always aligned we can (should!) use the same + // stub as for case oop_arraycopy_uninit + ShouldNotReachHere(); + break; + default: + ShouldNotReachHere(); + break; + } + __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); Label L_0_count, L_exit, L_skip_align1, L_skip_align2, L_copy_byte; @@ -1429,9 +1618,10 @@ class StubGenerator: public StubCodeGenerator { } - address generate_disjoint_long_copy(address* entry, const char *name) { + address generate_disjoint_long_copy(address* entry) { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubGenStubId stub_id = StubGenStubId::jlong_disjoint_arraycopy_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); Label L_copy_8_bytes, L_copy_8_bytes_loop; @@ -1474,10 +1664,10 @@ class StubGenerator: public StubCodeGenerator { return start; } - address generate_conjoint_long_copy(address nooverlap_target, - address* entry, const char *name) { + address generate_conjoint_long_copy(address nooverlap_target, address* entry) { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubGenStubId stub_id = StubGenStubId::jlong_arraycopy_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); Label L_copy_8_bytes, L_copy_8_bytes_loop; @@ -1598,9 +1788,21 @@ class StubGenerator: public StubCodeGenerator { // rax, == 0 - success // rax, == -1^K - failure, where K is partial transfer count // - address generate_checkcast_copy(const char *name, address* entry, bool dest_uninitialized = false) { + address generate_checkcast_copy(StubGenStubId stub_id, address* entry) { + bool dest_uninitialized; + switch(stub_id) { + case checkcast_arraycopy_id: + dest_uninitialized = false; + break; + case checkcast_arraycopy_uninit_id: + dest_uninitialized = true; + break; + default: + ShouldNotReachHere(); + } + __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); Label L_load_element, L_store_element, L_do_card_marks, L_done; @@ -1755,8 +1957,7 @@ class StubGenerator: public StubCodeGenerator { // Examines the alignment of the operands and dispatches // to a long, int, short, or byte copy loop. // - address generate_unsafe_copy(const char *name, - address byte_copy_entry, + address generate_unsafe_copy(address byte_copy_entry, address short_copy_entry, address int_copy_entry, address long_copy_entry) { @@ -1764,7 +1965,8 @@ class StubGenerator: public StubCodeGenerator { Label L_long_aligned, L_int_aligned, L_short_aligned; __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubGenStubId stub_id = StubGenStubId::unsafe_arraycopy_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); const Register from = rax; // source array address @@ -1861,8 +2063,7 @@ class StubGenerator: public StubCodeGenerator { // rax, == 0 - success // rax, == -1^K - failure, where K is partial transfer count // - address generate_generic_copy(const char *name, - address entry_jbyte_arraycopy, + address generate_generic_copy(address entry_jbyte_arraycopy, address entry_jshort_arraycopy, address entry_jint_arraycopy, address entry_oop_arraycopy, @@ -1876,7 +2077,8 @@ class StubGenerator: public StubCodeGenerator { if (advance < 0) advance += modulus; if (advance > 0) __ nop(advance); } - StubCodeMark mark(this, "StubRoutines", name); + StubGenStubId stub_id = StubGenStubId::generic_arraycopy_id; + StubCodeMark mark(this, stub_id); // Short-hop target to L_failed. Makes for denser prologue code. __ BIND(L_failed_0); @@ -2189,67 +2391,50 @@ class StubGenerator: public StubCodeGenerator { address entry_checkcast_arraycopy; StubRoutines::_arrayof_jbyte_disjoint_arraycopy = - generate_disjoint_copy(T_BYTE, true, Address::times_1, &entry, - "arrayof_jbyte_disjoint_arraycopy"); + generate_disjoint_copy(StubGenStubId::arrayof_jbyte_disjoint_arraycopy_id, &entry); StubRoutines::_arrayof_jbyte_arraycopy = - generate_conjoint_copy(T_BYTE, true, Address::times_1, entry, - nullptr, "arrayof_jbyte_arraycopy"); + generate_conjoint_copy(StubGenStubId::arrayof_jbyte_arraycopy_id, entry, nullptr); StubRoutines::_jbyte_disjoint_arraycopy = - generate_disjoint_copy(T_BYTE, false, Address::times_1, &entry, - "jbyte_disjoint_arraycopy"); + generate_disjoint_copy(StubGenStubId::jbyte_disjoint_arraycopy_id, &entry); StubRoutines::_jbyte_arraycopy = - generate_conjoint_copy(T_BYTE, false, Address::times_1, entry, - &entry_jbyte_arraycopy, "jbyte_arraycopy"); + generate_conjoint_copy(StubGenStubId::jbyte_arraycopy_id, entry, &entry_jbyte_arraycopy); StubRoutines::_arrayof_jshort_disjoint_arraycopy = - generate_disjoint_copy(T_SHORT, true, Address::times_2, &entry, - "arrayof_jshort_disjoint_arraycopy"); + generate_disjoint_copy(StubGenStubId::arrayof_jshort_disjoint_arraycopy_id, &entry); StubRoutines::_arrayof_jshort_arraycopy = - generate_conjoint_copy(T_SHORT, true, Address::times_2, entry, - nullptr, "arrayof_jshort_arraycopy"); + generate_conjoint_copy(StubGenStubId::arrayof_jshort_arraycopy_id, entry, nullptr); StubRoutines::_jshort_disjoint_arraycopy = - generate_disjoint_copy(T_SHORT, false, Address::times_2, &entry, - "jshort_disjoint_arraycopy"); + generate_disjoint_copy(StubGenStubId::jshort_disjoint_arraycopy_id, &entry); StubRoutines::_jshort_arraycopy = - generate_conjoint_copy(T_SHORT, false, Address::times_2, entry, - &entry_jshort_arraycopy, "jshort_arraycopy"); + generate_conjoint_copy(StubGenStubId::jshort_arraycopy_id, entry, &entry_jshort_arraycopy); // Next arrays are always aligned on 4 bytes at least. StubRoutines::_jint_disjoint_arraycopy = - generate_disjoint_copy(T_INT, true, Address::times_4, &entry, - "jint_disjoint_arraycopy"); + generate_disjoint_copy(StubGenStubId::jint_disjoint_arraycopy_id, &entry); StubRoutines::_jint_arraycopy = - generate_conjoint_copy(T_INT, true, Address::times_4, entry, - &entry_jint_arraycopy, "jint_arraycopy"); + generate_conjoint_copy(StubGenStubId::jint_arraycopy_id, entry, &entry_jint_arraycopy); StubRoutines::_oop_disjoint_arraycopy = - generate_disjoint_copy(T_OBJECT, true, Address::times_ptr, &entry, - "oop_disjoint_arraycopy"); + generate_disjoint_copy(StubGenStubId::oop_disjoint_arraycopy_id, &entry); StubRoutines::_oop_arraycopy = - generate_conjoint_copy(T_OBJECT, true, Address::times_ptr, entry, - &entry_oop_arraycopy, "oop_arraycopy"); + generate_conjoint_copy(StubGenStubId::oop_arraycopy_id, entry, &entry_oop_arraycopy); StubRoutines::_oop_disjoint_arraycopy_uninit = - generate_disjoint_copy(T_OBJECT, true, Address::times_ptr, &entry, - "oop_disjoint_arraycopy_uninit", - /*dest_uninitialized*/true); + generate_disjoint_copy(StubGenStubId::oop_disjoint_arraycopy_uninit_id, &entry); StubRoutines::_oop_arraycopy_uninit = - generate_conjoint_copy(T_OBJECT, true, Address::times_ptr, entry, - nullptr, "oop_arraycopy_uninit", - /*dest_uninitialized*/true); + generate_conjoint_copy(StubGenStubId::oop_arraycopy_uninit_id, entry, nullptr); StubRoutines::_jlong_disjoint_arraycopy = - generate_disjoint_long_copy(&entry, "jlong_disjoint_arraycopy"); + generate_disjoint_long_copy(&entry); StubRoutines::_jlong_arraycopy = - generate_conjoint_long_copy(entry, &entry_jlong_arraycopy, - "jlong_arraycopy"); + generate_conjoint_long_copy(entry, &entry_jlong_arraycopy); - StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); - StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); - StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); - StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); - StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); - StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); + StubRoutines::_jbyte_fill = generate_fill(StubGenStubId::jbyte_fill_id); + StubRoutines::_jshort_fill = generate_fill(StubGenStubId::jshort_fill_id); + StubRoutines::_jint_fill = generate_fill(StubGenStubId::jint_fill_id); + StubRoutines::_arrayof_jbyte_fill = generate_fill(StubGenStubId::arrayof_jbyte_fill_id); + StubRoutines::_arrayof_jshort_fill = generate_fill(StubGenStubId::arrayof_jshort_fill_id); + StubRoutines::_arrayof_jint_fill = generate_fill(StubGenStubId::arrayof_jint_fill_id); StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; @@ -2262,20 +2447,18 @@ class StubGenerator: public StubCodeGenerator { StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; StubRoutines::_checkcast_arraycopy = - generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); + generate_checkcast_copy(StubGenStubId::checkcast_arraycopy_id, &entry_checkcast_arraycopy); StubRoutines::_checkcast_arraycopy_uninit = - generate_checkcast_copy("checkcast_arraycopy_uninit", nullptr, /*dest_uninitialized*/true); + generate_checkcast_copy(StubGenStubId::checkcast_arraycopy_uninit_id, nullptr); StubRoutines::_unsafe_arraycopy = - generate_unsafe_copy("unsafe_arraycopy", - entry_jbyte_arraycopy, - entry_jshort_arraycopy, - entry_jint_arraycopy, - entry_jlong_arraycopy); + generate_unsafe_copy(entry_jbyte_arraycopy, + entry_jshort_arraycopy, + entry_jint_arraycopy, + entry_jlong_arraycopy); StubRoutines::_generic_arraycopy = - generate_generic_copy("generic_arraycopy", - entry_jbyte_arraycopy, + generate_generic_copy( entry_jbyte_arraycopy, entry_jshort_arraycopy, entry_jint_arraycopy, entry_oop_arraycopy, @@ -2355,7 +2538,8 @@ class StubGenerator: public StubCodeGenerator { address generate_aescrypt_encryptBlock() { assert(UseAES, "need AES instructions and misaligned SSE support"); __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); + StubGenStubId stub_id = StubGenStubId::aescrypt_encryptBlock_id; + StubCodeMark mark(this, stub_id); Label L_doLast; address start = __ pc(); @@ -2454,7 +2638,8 @@ class StubGenerator: public StubCodeGenerator { address generate_aescrypt_decryptBlock() { assert(UseAES, "need AES instructions and misaligned SSE support"); __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); + StubGenStubId stub_id = StubGenStubId::aescrypt_decryptBlock_id; + StubCodeMark mark(this, stub_id); Label L_doLast; address start = __ pc(); @@ -2578,7 +2763,8 @@ class StubGenerator: public StubCodeGenerator { address generate_cipherBlockChaining_encryptAESCrypt() { assert(UseAES, "need AES instructions and misaligned SSE support"); __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); + StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_encryptAESCrypt_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; @@ -2736,7 +2922,8 @@ class StubGenerator: public StubCodeGenerator { address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { assert(UseAES, "need AES instructions and misaligned SSE support"); __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); + StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_decryptAESCrypt_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); const Register from = rsi; // source array address @@ -2909,7 +3096,8 @@ class StubGenerator: public StubCodeGenerator { address generate_counterMode_AESCrypt_Parallel() { assert(UseAES, "need AES instructions and misaligned SSE support"); __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt"); + StubGenStubId stub_id = StubGenStubId::counterMode_AESCrypt_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); const Register from = rsi; // source array address const Register to = rdx; // destination array address @@ -3191,9 +3379,21 @@ class StubGenerator: public StubCodeGenerator { // ofs and limit are use for multi-block byte array. // int com.sun.security.provider.MD5.implCompress(byte[] b, int ofs) - address generate_md5_implCompress(bool multi_block, const char *name) { + address generate_md5_implCompress(StubGenStubId stub_id) { + bool multi_block; + switch(stub_id) { + case StubGenStubId::md5_implCompress_id: + multi_block = false; + break; + case StubGenStubId::md5_implCompressMB_id: + multi_block = true; + break; + default: + ShouldNotReachHere(); + } + __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); const Register buf_param = rbp; @@ -3231,7 +3431,8 @@ class StubGenerator: public StubCodeGenerator { address generate_upper_word_mask() { __ align64(); - StubCodeMark mark(this, "StubRoutines", "upper_word_mask"); + StubGenStubId stub_id = StubGenStubId::upper_word_mask_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ emit_data(0x00000000, relocInfo::none, 0); __ emit_data(0x00000000, relocInfo::none, 0); @@ -3242,7 +3443,8 @@ class StubGenerator: public StubCodeGenerator { address generate_shuffle_byte_flip_mask() { __ align64(); - StubCodeMark mark(this, "StubRoutines", "shuffle_byte_flip_mask"); + StubGenStubId stub_id = StubGenStubId::shuffle_byte_flip_mask_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ emit_data(0x0c0d0e0f, relocInfo::none, 0); __ emit_data(0x08090a0b, relocInfo::none, 0); @@ -3253,9 +3455,21 @@ class StubGenerator: public StubCodeGenerator { // ofs and limit are use for multi-block byte array. // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) - address generate_sha1_implCompress(bool multi_block, const char *name) { + address generate_sha1_implCompress(StubGenStubId stub_id) { + bool multi_block; + switch(stub_id) { + case StubGenStubId::sha1_implCompress_id: + multi_block = false; + break; + case StubGenStubId::sha1_implCompressMB_id: + multi_block = true; + break; + default: + ShouldNotReachHere(); + } + __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); Register buf = rax; @@ -3301,7 +3515,8 @@ class StubGenerator: public StubCodeGenerator { address generate_pshuffle_byte_flip_mask() { __ align64(); - StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask"); + StubGenStubId stub_id = StubGenStubId::pshuffle_byte_flip_mask_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ emit_data(0x00010203, relocInfo::none, 0); __ emit_data(0x04050607, relocInfo::none, 0); @@ -3312,9 +3527,21 @@ class StubGenerator: public StubCodeGenerator { // ofs and limit are use for multi-block byte array. // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) - address generate_sha256_implCompress(bool multi_block, const char *name) { + address generate_sha256_implCompress(StubGenStubId stub_id) { + bool multi_block; + switch(stub_id) { + case StubGenStubId::sha256_implCompress_id: + multi_block = false; + break; + case StubGenStubId::sha256_implCompressMB_id: + multi_block = true; + break; + default: + ShouldNotReachHere(); + } + __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); Register buf = rbx; @@ -3372,7 +3599,9 @@ class StubGenerator: public StubCodeGenerator { assert(UseGHASHIntrinsics, "need GHASH intrinsics and CLMUL support"); __ align(CodeEntryAlignment); Label L_ghash_loop, L_exit; - StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); + StubGenStubId stub_id = StubGenStubId::ghash_processBlocks_id; + StubCodeMark mark(this, stub_id); + address start = __ pc(); const Register state = rdi; @@ -3520,7 +3749,8 @@ class StubGenerator: public StubCodeGenerator { assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); + StubGenStubId stub_id = StubGenStubId::updateBytesCRC32_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3575,7 +3805,9 @@ class StubGenerator: public StubCodeGenerator { address generate_updateBytesCRC32C(bool is_pclmulqdq_supported) { assert(UseCRC32CIntrinsics, "need SSE4_2"); __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32C"); + StubGenStubId stub_id = StubGenStubId::updateBytesCRC32C_id; + StubCodeMark mark(this, stub_id); + address start = __ pc(); const Register crc = rax; // crc const Register buf = rcx; // source java byte array address @@ -3618,7 +3850,8 @@ class StubGenerator: public StubCodeGenerator { } address generate_libmExp() { - StubCodeMark mark(this, "StubRoutines", "libmExp"); + StubGenStubId stub_id = StubGenStubId::dexp_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3634,7 +3867,8 @@ class StubGenerator: public StubCodeGenerator { } address generate_libmLog() { - StubCodeMark mark(this, "StubRoutines", "libmLog"); + StubGenStubId stub_id = StubGenStubId::dlog_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3650,7 +3884,8 @@ class StubGenerator: public StubCodeGenerator { } address generate_libmLog10() { - StubCodeMark mark(this, "StubRoutines", "libmLog10"); + StubGenStubId stub_id = StubGenStubId::dlog10_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3666,7 +3901,8 @@ class StubGenerator: public StubCodeGenerator { } address generate_libmPow() { - StubCodeMark mark(this, "StubRoutines", "libmPow"); + StubGenStubId stub_id = StubGenStubId::dpow_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3682,7 +3918,8 @@ class StubGenerator: public StubCodeGenerator { } address generate_libm_reduce_pi04l() { - StubCodeMark mark(this, "StubRoutines", "libm_reduce_pi04l"); + StubGenStubId stub_id = StubGenStubId::dlibm_reduce_pi04l_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3694,7 +3931,8 @@ class StubGenerator: public StubCodeGenerator { } address generate_libm_sin_cos_huge() { - StubCodeMark mark(this, "StubRoutines", "libm_sin_cos_huge"); + StubGenStubId stub_id = StubGenStubId::dlibm_sin_cos_huge_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3706,7 +3944,8 @@ class StubGenerator: public StubCodeGenerator { } address generate_libmSin() { - StubCodeMark mark(this, "StubRoutines", "libmSin"); + StubGenStubId stub_id = StubGenStubId::dsin_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3722,7 +3961,8 @@ class StubGenerator: public StubCodeGenerator { } address generate_libmCos() { - StubCodeMark mark(this, "StubRoutines", "libmCos"); + StubGenStubId stub_id = StubGenStubId::dcos_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3738,7 +3978,8 @@ class StubGenerator: public StubCodeGenerator { } address generate_libm_tan_cot_huge() { - StubCodeMark mark(this, "StubRoutines", "libm_tan_cot_huge"); + StubGenStubId stub_id = StubGenStubId::dlibm_tan_cot_huge_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3750,7 +3991,8 @@ class StubGenerator: public StubCodeGenerator { } address generate_libmTan() { - StubCodeMark mark(this, "StubRoutines", "libmTan"); + StubGenStubId stub_id = StubGenStubId::dtan_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3767,7 +4009,8 @@ class StubGenerator: public StubCodeGenerator { address generate_method_entry_barrier() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "nmethod_entry_barrier"); + StubGenStubId stub_id = StubGenStubId::method_entry_barrier_id; + StubCodeMark mark(this, stub_id); Label deoptimize_label; @@ -3987,35 +4230,35 @@ class StubGenerator: public StubCodeGenerator { // entry points that are C2/JVMCI specific - StubRoutines::x86::_vector_float_sign_mask = generate_vector_mask("vector_float_sign_mask", 0x7FFFFFFF); - StubRoutines::x86::_vector_float_sign_flip = generate_vector_mask("vector_float_sign_flip", 0x80000000); - StubRoutines::x86::_vector_double_sign_mask = generate_vector_mask_long_double("vector_double_sign_mask", 0x7FFFFFFF, 0xFFFFFFFF); - StubRoutines::x86::_vector_double_sign_flip = generate_vector_mask_long_double("vector_double_sign_flip", 0x80000000, 0x00000000); - StubRoutines::x86::_vector_short_to_byte_mask = generate_vector_mask("vector_short_to_byte_mask", 0x00ff00ff); - StubRoutines::x86::_vector_int_to_byte_mask = generate_vector_mask("vector_int_to_byte_mask", 0x000000ff); - StubRoutines::x86::_vector_int_to_short_mask = generate_vector_mask("vector_int_to_short_mask", 0x0000ffff); - StubRoutines::x86::_vector_32_bit_mask = generate_vector_custom_i32("vector_32_bit_mask", Assembler::AVX_512bit, + StubRoutines::x86::_vector_float_sign_mask = generate_vector_mask(StubGenStubId::vector_float_sign_mask_id, 0x7FFFFFFF); + StubRoutines::x86::_vector_float_sign_flip = generate_vector_mask(StubGenStubId::vector_float_sign_flip_id, 0x80000000); + StubRoutines::x86::_vector_double_sign_mask = generate_vector_mask_long_double(StubGenStubId::vector_double_sign_mask_id, 0x7FFFFFFF, 0xFFFFFFFF); + StubRoutines::x86::_vector_double_sign_flip = generate_vector_mask_long_double(StubGenStubId::vector_double_sign_flip_id, 0x80000000, 0x00000000); + StubRoutines::x86::_vector_short_to_byte_mask = generate_vector_mask(StubGenStubId::vector_short_to_byte_mask_id, 0x00ff00ff); + StubRoutines::x86::_vector_int_to_byte_mask = generate_vector_mask(StubGenStubId::vector_int_to_byte_mask_id, 0x000000ff); + StubRoutines::x86::_vector_int_to_short_mask = generate_vector_mask(StubGenStubId::vector_int_to_short_mask_id, 0x0000ffff); + StubRoutines::x86::_vector_32_bit_mask = generate_vector_custom_i32(StubGenStubId::vector_32_bit_mask_id, Assembler::AVX_512bit, 0xFFFFFFFF, 0, 0, 0); - StubRoutines::x86::_vector_64_bit_mask = generate_vector_custom_i32("vector_64_bit_mask", Assembler::AVX_512bit, + StubRoutines::x86::_vector_64_bit_mask = generate_vector_custom_i32(StubGenStubId::vector_64_bit_mask_id, Assembler::AVX_512bit, 0xFFFFFFFF, 0xFFFFFFFF, 0, 0); - StubRoutines::x86::_vector_int_shuffle_mask = generate_vector_mask("vector_int_shuffle_mask", 0x03020100); - StubRoutines::x86::_vector_byte_shuffle_mask = generate_vector_byte_shuffle_mask("vector_byte_shuffle_mask"); - StubRoutines::x86::_vector_short_shuffle_mask = generate_vector_mask("vector_short_shuffle_mask", 0x01000100); - StubRoutines::x86::_vector_long_shuffle_mask = generate_vector_mask_long_double("vector_long_shuffle_mask", 0x00000001, 0x0); - StubRoutines::x86::_vector_byte_perm_mask = generate_vector_byte_perm_mask("vector_byte_perm_mask"); - StubRoutines::x86::_vector_long_sign_mask = generate_vector_mask_long_double("vector_long_sign_mask", 0x80000000, 0x00000000); - StubRoutines::x86::_vector_all_bits_set = generate_vector_mask("vector_all_bits_set", 0xFFFFFFFF); - StubRoutines::x86::_vector_int_mask_cmp_bits = generate_vector_mask("vector_int_mask_cmp_bits", 0x00000001); - StubRoutines::x86::_vector_iota_indices = generate_iota_indices("iota_indices"); - StubRoutines::x86::_vector_count_leading_zeros_lut = generate_count_leading_zeros_lut("count_leading_zeros_lut"); - StubRoutines::x86::_vector_reverse_bit_lut = generate_vector_reverse_bit_lut("reverse_bit_lut"); - StubRoutines::x86::_vector_reverse_byte_perm_mask_long = generate_vector_reverse_byte_perm_mask_long("perm_mask_long"); - StubRoutines::x86::_vector_reverse_byte_perm_mask_int = generate_vector_reverse_byte_perm_mask_int("perm_mask_int"); - StubRoutines::x86::_vector_reverse_byte_perm_mask_short = generate_vector_reverse_byte_perm_mask_short("perm_mask_short"); + StubRoutines::x86::_vector_int_shuffle_mask = generate_vector_mask(StubGenStubId::vector_int_shuffle_mask_id, 0x03020100); + StubRoutines::x86::_vector_byte_shuffle_mask = generate_vector_byte_shuffle_mask(); + StubRoutines::x86::_vector_short_shuffle_mask = generate_vector_mask(StubGenStubId::vector_short_shuffle_mask_id, 0x01000100); + StubRoutines::x86::_vector_long_shuffle_mask = generate_vector_mask_long_double(StubGenStubId::vector_long_shuffle_mask_id, 0x00000001, 0x0); + StubRoutines::x86::_vector_byte_perm_mask = generate_vector_byte_perm_mask(); + StubRoutines::x86::_vector_long_sign_mask = generate_vector_mask_long_double(StubGenStubId::vector_long_sign_mask_id, 0x80000000, 0x00000000); + StubRoutines::x86::_vector_all_bits_set = generate_vector_mask(StubGenStubId::vector_all_bits_set_id, 0xFFFFFFFF); + StubRoutines::x86::_vector_int_mask_cmp_bits = generate_vector_mask(StubGenStubId::vector_int_mask_cmp_bits_id, 0x00000001); + StubRoutines::x86::_vector_iota_indices = generate_iota_indices(); + StubRoutines::x86::_vector_count_leading_zeros_lut = generate_count_leading_zeros_lut(); + StubRoutines::x86::_vector_reverse_bit_lut = generate_vector_reverse_bit_lut(); + StubRoutines::x86::_vector_reverse_byte_perm_mask_long = generate_vector_reverse_byte_perm_mask_long(); + StubRoutines::x86::_vector_reverse_byte_perm_mask_int = generate_vector_reverse_byte_perm_mask_int(); + StubRoutines::x86::_vector_reverse_byte_perm_mask_short = generate_vector_reverse_byte_perm_mask_short(); if (VM_Version::supports_avx2() && !VM_Version::supports_avx512_vpopcntdq()) { // lut implementation influenced by counting 1s algorithm from section 5-1 of Hackers' Delight. - StubRoutines::x86::_vector_popcount_lut = generate_popcount_avx_lut("popcount_lut"); + StubRoutines::x86::_vector_popcount_lut = generate_popcount_avx_lut(); } // don't bother generating these AES intrinsic stubs unless global flag is set @@ -4031,20 +4274,20 @@ class StubGenerator: public StubCodeGenerator { } if (UseMD5Intrinsics) { - StubRoutines::_md5_implCompress = generate_md5_implCompress(false, "md5_implCompress"); - StubRoutines::_md5_implCompressMB = generate_md5_implCompress(true, "md5_implCompressMB"); + StubRoutines::_md5_implCompress = generate_md5_implCompress(StubGenStubId::md5_implCompress_id); + StubRoutines::_md5_implCompressMB = generate_md5_implCompress(StubGenStubId::md5_implCompressMB_id); } if (UseSHA1Intrinsics) { StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask(); StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask(); - StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress"); - StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB"); + StubRoutines::_sha1_implCompress = generate_sha1_implCompress(StubGenStubId::sha1_implCompress_id); + StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(StubGenStubId::sha1_implCompressMB_id); } if (UseSHA256Intrinsics) { StubRoutines::x86::_k256_adr = (address)StubRoutines::x86::_k256; StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask(); - StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress"); - StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB"); + StubRoutines::_sha256_implCompress = generate_sha256_implCompress(StubGenStubId::sha256_implCompress_id); + StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(StubGenStubId::sha256_implCompressMB_id); } // Generate GHASH intrinsics code @@ -4056,27 +4299,27 @@ class StubGenerator: public StubCodeGenerator { public: - StubGenerator(CodeBuffer* code, StubsKind kind) : StubCodeGenerator(code) { - switch(kind) { - case Initial_stubs: + StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) { + switch(blob_id) { + case initial_id: generate_initial_stubs(); break; - case Continuation_stubs: + case continuation_id: generate_continuation_stubs(); break; - case Compiler_stubs: + case compiler_id: generate_compiler_stubs(); break; - case Final_stubs: + case final_id: generate_final_stubs(); break; default: - fatal("unexpected stubs kind: %d", kind); + fatal("unexpected blob id: %d", blob_id); break; }; } }; // end class declaration -void StubGenerator_generate(CodeBuffer* code, StubCodeGenerator::StubsKind kind) { - StubGenerator g(code, kind); +void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id) { + StubGenerator g(code, blob_id); } diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp index 38a8eb6981f5c..fdc53a8657665 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp @@ -187,7 +187,8 @@ address StubGenerator::generate_call_stub(address& return_address) { assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, "adjust this code"); - StubCodeMark mark(this, "StubRoutines", "call_stub"); + StubGenStubId stub_id = StubGenStubId::call_stub_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); // same as in generate_catch_exception()! @@ -412,7 +413,8 @@ address StubGenerator::generate_call_stub(address& return_address) { // rax: exception oop address StubGenerator::generate_catch_exception() { - StubCodeMark mark(this, "StubRoutines", "catch_exception"); + StubGenStubId stub_id = StubGenStubId::catch_exception_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); // same as in generate_call_stub(): @@ -467,7 +469,8 @@ address StubGenerator::generate_catch_exception() { // NOTE: At entry of this stub, exception-pc must be on stack !! address StubGenerator::generate_forward_exception() { - StubCodeMark mark(this, "StubRoutines", "forward exception"); + StubGenStubId stub_id = StubGenStubId::forward_exception_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); // Upon entry, the sp points to the return address returning into @@ -530,7 +533,8 @@ address StubGenerator::generate_forward_exception() { // // Result: address StubGenerator::generate_orderaccess_fence() { - StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); + StubGenStubId stub_id = StubGenStubId::fence_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ membar(Assembler::StoreLoad); @@ -545,7 +549,8 @@ address StubGenerator::generate_orderaccess_fence() { // This routine is used to find the previous stack pointer for the // caller. address StubGenerator::generate_get_previous_sp() { - StubCodeMark mark(this, "StubRoutines", "get_previous_sp"); + StubGenStubId stub_id = StubGenStubId::get_previous_sp_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ movptr(rax, rsp); @@ -563,7 +568,8 @@ address StubGenerator::generate_get_previous_sp() { // MXCSR register to our expected state. address StubGenerator::generate_verify_mxcsr() { - StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); + StubGenStubId stub_id = StubGenStubId::verify_mxcsr_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); const Address mxcsr_save(rsp, 0); @@ -594,7 +600,8 @@ address StubGenerator::generate_verify_mxcsr() { } address StubGenerator::generate_f2i_fixup() { - StubCodeMark mark(this, "StubRoutines", "f2i_fixup"); + StubGenStubId stub_id = StubGenStubId::f2i_fixup_id; + StubCodeMark mark(this, stub_id); Address inout(rsp, 5 * wordSize); // return address + 4 saves address start = __ pc(); @@ -632,7 +639,8 @@ address StubGenerator::generate_f2i_fixup() { } address StubGenerator::generate_f2l_fixup() { - StubCodeMark mark(this, "StubRoutines", "f2l_fixup"); + StubGenStubId stub_id = StubGenStubId::f2l_fixup_id; + StubCodeMark mark(this, stub_id); Address inout(rsp, 5 * wordSize); // return address + 4 saves address start = __ pc(); @@ -669,7 +677,8 @@ address StubGenerator::generate_f2l_fixup() { } address StubGenerator::generate_d2i_fixup() { - StubCodeMark mark(this, "StubRoutines", "d2i_fixup"); + StubGenStubId stub_id = StubGenStubId::d2i_fixup_id; + StubCodeMark mark(this, stub_id); Address inout(rsp, 6 * wordSize); // return address + 5 saves address start = __ pc(); @@ -716,7 +725,8 @@ address StubGenerator::generate_d2i_fixup() { } address StubGenerator::generate_d2l_fixup() { - StubCodeMark mark(this, "StubRoutines", "d2l_fixup"); + StubGenStubId stub_id = StubGenStubId::d2l_fixup_id; + StubCodeMark mark(this, stub_id); Address inout(rsp, 6 * wordSize); // return address + 5 saves address start = __ pc(); @@ -762,9 +772,10 @@ address StubGenerator::generate_d2l_fixup() { return start; } -address StubGenerator::generate_count_leading_zeros_lut(const char *stub_name) { +address StubGenerator::generate_count_leading_zeros_lut() { __ align64(); - StubCodeMark mark(this, "StubRoutines", stub_name); + StubGenStubId stub_id = StubGenStubId::vector_count_leading_zeros_lut_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ emit_data64(0x0101010102020304, relocInfo::none); @@ -779,9 +790,10 @@ address StubGenerator::generate_count_leading_zeros_lut(const char *stub_name) { return start; } -address StubGenerator::generate_popcount_avx_lut(const char *stub_name) { +address StubGenerator::generate_popcount_avx_lut() { __ align64(); - StubCodeMark mark(this, "StubRoutines", stub_name); + StubGenStubId stub_id = StubGenStubId::vector_popcount_lut_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ emit_data64(0x0302020102010100, relocInfo::none); @@ -796,9 +808,10 @@ address StubGenerator::generate_popcount_avx_lut(const char *stub_name) { return start; } -address StubGenerator::generate_iota_indices(const char *stub_name) { +address StubGenerator::generate_iota_indices() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", stub_name); + StubGenStubId stub_id = StubGenStubId::vector_iota_indices_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); // B __ emit_data64(0x0706050403020100, relocInfo::none); @@ -857,9 +870,10 @@ address StubGenerator::generate_iota_indices(const char *stub_name) { return start; } -address StubGenerator::generate_vector_reverse_bit_lut(const char *stub_name) { +address StubGenerator::generate_vector_reverse_bit_lut() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", stub_name); + StubGenStubId stub_id = StubGenStubId::vector_reverse_bit_lut_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ emit_data64(0x0E060A020C040800, relocInfo::none); @@ -874,9 +888,10 @@ address StubGenerator::generate_vector_reverse_bit_lut(const char *stub_name) { return start; } -address StubGenerator::generate_vector_reverse_byte_perm_mask_long(const char *stub_name) { +address StubGenerator::generate_vector_reverse_byte_perm_mask_long() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", stub_name); + StubGenStubId stub_id = StubGenStubId::vector_reverse_byte_perm_mask_long_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ emit_data64(0x0001020304050607, relocInfo::none); @@ -891,9 +906,10 @@ address StubGenerator::generate_vector_reverse_byte_perm_mask_long(const char *s return start; } -address StubGenerator::generate_vector_reverse_byte_perm_mask_int(const char *stub_name) { +address StubGenerator::generate_vector_reverse_byte_perm_mask_int() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", stub_name); + StubGenStubId stub_id = StubGenStubId::vector_reverse_byte_perm_mask_int_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ emit_data64(0x0405060700010203, relocInfo::none); @@ -908,9 +924,10 @@ address StubGenerator::generate_vector_reverse_byte_perm_mask_int(const char *st return start; } -address StubGenerator::generate_vector_reverse_byte_perm_mask_short(const char *stub_name) { +address StubGenerator::generate_vector_reverse_byte_perm_mask_short() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", stub_name); + StubGenStubId stub_id = StubGenStubId::vector_reverse_byte_perm_mask_short_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ emit_data64(0x0607040502030001, relocInfo::none); @@ -925,9 +942,10 @@ address StubGenerator::generate_vector_reverse_byte_perm_mask_short(const char * return start; } -address StubGenerator::generate_vector_byte_shuffle_mask(const char *stub_name) { +address StubGenerator::generate_vector_byte_shuffle_mask() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", stub_name); + StubGenStubId stub_id = StubGenStubId::vector_byte_shuffle_mask_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ emit_data64(0x7070707070707070, relocInfo::none); @@ -938,9 +956,9 @@ address StubGenerator::generate_vector_byte_shuffle_mask(const char *stub_name) return start; } -address StubGenerator::generate_fp_mask(const char *stub_name, int64_t mask) { +address StubGenerator::generate_fp_mask(StubGenStubId stub_id, int64_t mask) { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", stub_name); + StubCodeMark mark(this, stub_id); address start = __ pc(); __ emit_data64( mask, relocInfo::none ); @@ -949,9 +967,20 @@ address StubGenerator::generate_fp_mask(const char *stub_name, int64_t mask) { return start; } -address StubGenerator::generate_compress_perm_table(const char *stub_name, int32_t esize) { +address StubGenerator::generate_compress_perm_table(StubGenStubId stub_id) { + int esize; + switch (stub_id) { + case compress_perm_table32_id: + esize = 32; + break; + case compress_perm_table64_id: + esize = 64; + break; + default: + ShouldNotReachHere(); + } __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", stub_name); + StubCodeMark mark(this, stub_id); address start = __ pc(); if (esize == 32) { // Loop to generate 256 x 8 int compression permute index table. A row is @@ -993,9 +1022,20 @@ address StubGenerator::generate_compress_perm_table(const char *stub_name, int32 return start; } -address StubGenerator::generate_expand_perm_table(const char *stub_name, int32_t esize) { +address StubGenerator::generate_expand_perm_table(StubGenStubId stub_id) { + int esize; + switch (stub_id) { + case expand_perm_table32_id: + esize = 32; + break; + case expand_perm_table64_id: + esize = 64; + break; + default: + ShouldNotReachHere(); + } __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", stub_name); + StubCodeMark mark(this, stub_id); address start = __ pc(); if (esize == 32) { // Loop to generate 256 x 8 int expand permute index table. A row is accessed @@ -1035,9 +1075,9 @@ address StubGenerator::generate_expand_perm_table(const char *stub_name, int32_t return start; } -address StubGenerator::generate_vector_mask(const char *stub_name, int64_t mask) { +address StubGenerator::generate_vector_mask(StubGenStubId stub_id, int64_t mask) { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", stub_name); + StubCodeMark mark(this, stub_id); address start = __ pc(); __ emit_data64(mask, relocInfo::none); @@ -1052,9 +1092,10 @@ address StubGenerator::generate_vector_mask(const char *stub_name, int64_t mask) return start; } -address StubGenerator::generate_vector_byte_perm_mask(const char *stub_name) { +address StubGenerator::generate_vector_byte_perm_mask() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", stub_name); + StubGenStubId stub_id = StubGenStubId::vector_byte_perm_mask_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ emit_data64(0x0000000000000001, relocInfo::none); @@ -1069,9 +1110,9 @@ address StubGenerator::generate_vector_byte_perm_mask(const char *stub_name) { return start; } -address StubGenerator::generate_vector_fp_mask(const char *stub_name, int64_t mask) { +address StubGenerator::generate_vector_fp_mask(StubGenStubId stub_id, int64_t mask) { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", stub_name); + StubCodeMark mark(this, stub_id); address start = __ pc(); __ emit_data64(mask, relocInfo::none); @@ -1086,13 +1127,13 @@ address StubGenerator::generate_vector_fp_mask(const char *stub_name, int64_t ma return start; } -address StubGenerator::generate_vector_custom_i32(const char *stub_name, Assembler::AvxVectorLen len, +address StubGenerator::generate_vector_custom_i32(StubGenStubId stub_id, Assembler::AvxVectorLen len, int32_t val0, int32_t val1, int32_t val2, int32_t val3, int32_t val4, int32_t val5, int32_t val6, int32_t val7, int32_t val8, int32_t val9, int32_t val10, int32_t val11, int32_t val12, int32_t val13, int32_t val14, int32_t val15) { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", stub_name); + StubCodeMark mark(this, stub_id); address start = __ pc(); assert(len != Assembler::AVX_NoVec, "vector len must be specified"); @@ -1136,7 +1177,8 @@ address StubGenerator::generate_vector_custom_i32(const char *stub_name, Assembl // * [tos + 8]: saved r10 (rscratch1) - saved by caller // * = popped on exit address StubGenerator::generate_verify_oop() { - StubCodeMark mark(this, "StubRoutines", "verify_oop"); + StubGenStubId stub_id = StubGenStubId::verify_oop_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); Label exit, error; @@ -1333,7 +1375,8 @@ address StubGenerator::generate_data_cache_writeback() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "_data_cache_writeback"); + StubGenStubId stub_id = StubGenStubId::data_cache_writeback_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -1350,7 +1393,8 @@ address StubGenerator::generate_data_cache_writeback_sync() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "_data_cache_writeback_sync"); + StubGenStubId stub_id = StubGenStubId::data_cache_writeback_sync_id; + StubCodeMark mark(this, stub_id); // pre wbsync is a no-op // post wbsync translates to an sfence @@ -1371,9 +1415,20 @@ address StubGenerator::generate_data_cache_writeback_sync() { // ofs and limit are use for multi-block byte array. // int com.sun.security.provider.MD5.implCompress(byte[] b, int ofs) -address StubGenerator::generate_md5_implCompress(bool multi_block, const char *name) { +address StubGenerator::generate_md5_implCompress(StubGenStubId stub_id) { + bool multi_block; + switch (stub_id) { + case md5_implCompress_id: + multi_block = false; + break; + case md5_implCompressMB_id: + multi_block = true; + break; + default: + ShouldNotReachHere(); + } __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); const Register buf_param = r15; @@ -1409,7 +1464,8 @@ address StubGenerator::generate_md5_implCompress(bool multi_block, const char *n address StubGenerator::generate_upper_word_mask() { __ align64(); - StubCodeMark mark(this, "StubRoutines", "upper_word_mask"); + StubGenStubId stub_id = StubGenStubId::upper_word_mask_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ emit_data64(0x0000000000000000, relocInfo::none); @@ -1420,7 +1476,8 @@ address StubGenerator::generate_upper_word_mask() { address StubGenerator::generate_shuffle_byte_flip_mask() { __ align64(); - StubCodeMark mark(this, "StubRoutines", "shuffle_byte_flip_mask"); + StubGenStubId stub_id = StubGenStubId::shuffle_byte_flip_mask_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); @@ -1431,9 +1488,20 @@ address StubGenerator::generate_shuffle_byte_flip_mask() { // ofs and limit are use for multi-block byte array. // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) -address StubGenerator::generate_sha1_implCompress(bool multi_block, const char *name) { +address StubGenerator::generate_sha1_implCompress(StubGenStubId stub_id) { + bool multi_block; + switch (stub_id) { + case sha1_implCompress_id: + multi_block = false; + break; + case sha1_implCompressMB_id: + multi_block = true; + break; + default: + ShouldNotReachHere(); + } __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); Register buf = c_rarg0; @@ -1468,7 +1536,8 @@ address StubGenerator::generate_sha1_implCompress(bool multi_block, const char * address StubGenerator::generate_pshuffle_byte_flip_mask() { __ align64(); - StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask"); + StubGenStubId stub_id = StubGenStubId::pshuffle_byte_flip_mask_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ emit_data64(0x0405060700010203, relocInfo::none); @@ -1495,7 +1564,8 @@ address StubGenerator::generate_pshuffle_byte_flip_mask() { //Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. address StubGenerator::generate_pshuffle_byte_flip_mask_sha512() { __ align32(); - StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask_sha512"); + StubGenStubId stub_id = StubGenStubId::pshuffle_byte_flip_mask_sha512_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); if (VM_Version::supports_avx2()) { @@ -1514,10 +1584,21 @@ address StubGenerator::generate_pshuffle_byte_flip_mask_sha512() { // ofs and limit are use for multi-block byte array. // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) -address StubGenerator::generate_sha256_implCompress(bool multi_block, const char *name) { +address StubGenerator::generate_sha256_implCompress(StubGenStubId stub_id) { + bool multi_block; + switch (stub_id) { + case sha256_implCompress_id: + multi_block = false; + break; + case sha256_implCompressMB_id: + multi_block = true; + break; + default: + ShouldNotReachHere(); + } assert(VM_Version::supports_sha() || VM_Version::supports_avx2(), ""); __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); Register buf = c_rarg0; @@ -1556,11 +1637,22 @@ address StubGenerator::generate_sha256_implCompress(bool multi_block, const char return start; } -address StubGenerator::generate_sha512_implCompress(bool multi_block, const char *name) { +address StubGenerator::generate_sha512_implCompress(StubGenStubId stub_id) { + bool multi_block; + switch (stub_id) { + case sha512_implCompress_id: + multi_block = false; + break; + case sha512_implCompressMB_id: + multi_block = true; + break; + default: + ShouldNotReachHere(); + } assert(VM_Version::supports_avx2(), ""); assert(VM_Version::supports_bmi2() || VM_Version::supports_sha512(), ""); __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); Register buf = c_rarg0; @@ -1595,7 +1687,8 @@ address StubGenerator::generate_sha512_implCompress(bool multi_block, const char address StubGenerator::base64_shuffle_addr() { __ align64(); - StubCodeMark mark(this, "StubRoutines", "shuffle_base64"); + StubGenStubId stub_id = StubGenStubId::shuffle_base64_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); assert(((unsigned long long)start & 0x3f) == 0, @@ -1614,7 +1707,8 @@ address StubGenerator::base64_shuffle_addr() { address StubGenerator::base64_avx2_shuffle_addr() { __ align32(); - StubCodeMark mark(this, "StubRoutines", "avx2_shuffle_base64"); + StubGenStubId stub_id = StubGenStubId::avx2_shuffle_base64_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ emit_data64(0x0809070805060405, relocInfo::none); @@ -1627,7 +1721,8 @@ address StubGenerator::base64_avx2_shuffle_addr() { address StubGenerator::base64_avx2_input_mask_addr() { __ align32(); - StubCodeMark mark(this, "StubRoutines", "avx2_input_mask_base64"); + StubGenStubId stub_id = StubGenStubId::avx2_input_mask_base64_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ emit_data64(0x8000000000000000, relocInfo::none); @@ -1640,7 +1735,8 @@ address StubGenerator::base64_avx2_input_mask_addr() { address StubGenerator::base64_avx2_lut_addr() { __ align32(); - StubCodeMark mark(this, "StubRoutines", "avx2_lut_base64"); + StubGenStubId stub_id = StubGenStubId::avx2_lut_base64_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ emit_data64(0xfcfcfcfcfcfc4741, relocInfo::none); @@ -1659,7 +1755,8 @@ address StubGenerator::base64_avx2_lut_addr() { address StubGenerator::base64_encoding_table_addr() { __ align64(); - StubCodeMark mark(this, "StubRoutines", "encoding_table_base64"); + StubGenStubId stub_id = StubGenStubId::encoding_table_base64_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); assert(((unsigned long long)start & 0x3f) == 0, "Alignment problem (0x%08llx)", (unsigned long long)start); @@ -1692,7 +1789,8 @@ address StubGenerator::base64_encoding_table_addr() { address StubGenerator::generate_base64_encodeBlock() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "implEncode"); + StubGenStubId stub_id = StubGenStubId::base64_encodeBlock_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -2074,7 +2172,8 @@ address StubGenerator::generate_base64_encodeBlock() // base64 AVX512vbmi tables address StubGenerator::base64_vbmi_lookup_lo_addr() { __ align64(); - StubCodeMark mark(this, "StubRoutines", "lookup_lo_base64"); + StubGenStubId stub_id = StubGenStubId::lookup_lo_base64_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); assert(((unsigned long long)start & 0x3f) == 0, @@ -2093,7 +2192,8 @@ address StubGenerator::base64_vbmi_lookup_lo_addr() { address StubGenerator::base64_vbmi_lookup_hi_addr() { __ align64(); - StubCodeMark mark(this, "StubRoutines", "lookup_hi_base64"); + StubGenStubId stub_id = StubGenStubId::lookup_hi_base64_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); assert(((unsigned long long)start & 0x3f) == 0, @@ -2111,7 +2211,8 @@ address StubGenerator::base64_vbmi_lookup_hi_addr() { } address StubGenerator::base64_vbmi_lookup_lo_url_addr() { __ align64(); - StubCodeMark mark(this, "StubRoutines", "lookup_lo_base64url"); + StubGenStubId stub_id = StubGenStubId::lookup_lo_base64url_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); assert(((unsigned long long)start & 0x3f) == 0, @@ -2130,7 +2231,8 @@ address StubGenerator::base64_vbmi_lookup_lo_url_addr() { address StubGenerator::base64_vbmi_lookup_hi_url_addr() { __ align64(); - StubCodeMark mark(this, "StubRoutines", "lookup_hi_base64url"); + StubGenStubId stub_id = StubGenStubId::lookup_hi_base64url_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); assert(((unsigned long long)start & 0x3f) == 0, @@ -2149,7 +2251,8 @@ address StubGenerator::base64_vbmi_lookup_hi_url_addr() { address StubGenerator::base64_vbmi_pack_vec_addr() { __ align64(); - StubCodeMark mark(this, "StubRoutines", "pack_vec_base64"); + StubGenStubId stub_id = StubGenStubId::pack_vec_base64_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); assert(((unsigned long long)start & 0x3f) == 0, @@ -2168,7 +2271,8 @@ address StubGenerator::base64_vbmi_pack_vec_addr() { address StubGenerator::base64_vbmi_join_0_1_addr() { __ align64(); - StubCodeMark mark(this, "StubRoutines", "join_0_1_base64"); + StubGenStubId stub_id = StubGenStubId::join_0_1_base64_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); assert(((unsigned long long)start & 0x3f) == 0, @@ -2187,7 +2291,8 @@ address StubGenerator::base64_vbmi_join_0_1_addr() { address StubGenerator::base64_vbmi_join_1_2_addr() { __ align64(); - StubCodeMark mark(this, "StubRoutines", "join_1_2_base64"); + StubGenStubId stub_id = StubGenStubId::join_1_2_base64_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); assert(((unsigned long long)start & 0x3f) == 0, @@ -2206,7 +2311,8 @@ address StubGenerator::base64_vbmi_join_1_2_addr() { address StubGenerator::base64_vbmi_join_2_3_addr() { __ align64(); - StubCodeMark mark(this, "StubRoutines", "join_2_3_base64"); + StubGenStubId stub_id = StubGenStubId::join_2_3_base64_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); assert(((unsigned long long)start & 0x3f) == 0, @@ -2225,7 +2331,8 @@ address StubGenerator::base64_vbmi_join_2_3_addr() { address StubGenerator::base64_AVX2_decode_tables_addr() { __ align64(); - StubCodeMark mark(this, "StubRoutines", "AVX2_tables_base64"); + StubGenStubId stub_id = StubGenStubId::avx2_decode_tables_base64_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); assert(((unsigned long long)start & 0x3f) == 0, @@ -2259,7 +2366,8 @@ address StubGenerator::base64_AVX2_decode_tables_addr() { address StubGenerator::base64_AVX2_decode_LUT_tables_addr() { __ align64(); - StubCodeMark mark(this, "StubRoutines", "AVX2_tables_URL_base64"); + StubGenStubId stub_id = StubGenStubId::avx2_decode_lut_tables_base64_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); assert(((unsigned long long)start & 0x3f) == 0, @@ -2298,7 +2406,8 @@ address StubGenerator::base64_AVX2_decode_LUT_tables_addr() { } address StubGenerator::base64_decoding_table_addr() { - StubCodeMark mark(this, "StubRoutines", "decoding_table_base64"); + StubGenStubId stub_id = StubGenStubId::decoding_table_base64_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ emit_data64(0xffffffffffffffff, relocInfo::none); @@ -2380,7 +2489,8 @@ address StubGenerator::base64_decoding_table_addr() { // private void decodeBlock(byte[] src, int sp, int sl, byte[] dst, int dp, boolean isURL, isMIME) { address StubGenerator::generate_base64_decodeBlock() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "implDecode"); + StubGenStubId stub_id = StubGenStubId::base64_decodeBlock_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -2913,7 +3023,8 @@ address StubGenerator::generate_updateBytesCRC32() { assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); + StubGenStubId stub_id = StubGenStubId::updateBytesCRC32_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -2969,7 +3080,8 @@ address StubGenerator::generate_updateBytesCRC32() { address StubGenerator::generate_updateBytesCRC32C(bool is_pclmulqdq_supported) { assert(UseCRC32CIntrinsics, "need SSE4_2"); __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32C"); + StubGenStubId stub_id = StubGenStubId::updateBytesCRC32C_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); //reg.arg int#0 int#1 int#2 int#3 int#4 int#5 float regs @@ -3049,7 +3161,8 @@ address StubGenerator::generate_updateBytesCRC32C(bool is_pclmulqdq_supported) { */ address StubGenerator::generate_multiplyToLen() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); + StubGenStubId stub_id = StubGenStubId::multiplyToLen_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) @@ -3105,7 +3218,8 @@ address StubGenerator::generate_multiplyToLen() { */ address StubGenerator::generate_vectorizedMismatch() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "vectorizedMismatch"); + StubGenStubId stub_id = StubGenStubId::vectorizedMismatch_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); BLOCK_COMMENT("Entry:"); @@ -3156,7 +3270,8 @@ address StubGenerator::generate_vectorizedMismatch() { address StubGenerator::generate_squareToLen() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "squareToLen"); + StubGenStubId stub_id = StubGenStubId::squareToLen_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) @@ -3191,7 +3306,8 @@ address StubGenerator::generate_squareToLen() { address StubGenerator::generate_method_entry_barrier() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "nmethod_entry_barrier"); + StubGenStubId stub_id = StubGenStubId::method_entry_barrier_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); Label deoptimize_label; @@ -3280,7 +3396,8 @@ address StubGenerator::generate_method_entry_barrier() { */ address StubGenerator::generate_mulAdd() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "mulAdd"); + StubGenStubId stub_id = StubGenStubId::mulAdd_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) @@ -3321,7 +3438,8 @@ address StubGenerator::generate_mulAdd() { address StubGenerator::generate_bigIntegerRightShift() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "bigIntegerRightShiftWorker"); + StubGenStubId stub_id = StubGenStubId::bigIntegerRightShiftWorker_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); Label Shift512Loop, ShiftTwo, ShiftTwoLoop, ShiftOne, Exit; @@ -3456,7 +3574,8 @@ address StubGenerator::generate_bigIntegerRightShift() { */ address StubGenerator::generate_bigIntegerLeftShift() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "bigIntegerLeftShiftWorker"); + StubGenStubId stub_id = StubGenStubId::bigIntegerLeftShiftWorker_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); Label Shift512Loop, ShiftTwo, ShiftTwoLoop, ShiftOne, Exit; @@ -3604,7 +3723,8 @@ void StubGenerator::generate_libm_stubs() { * xmm0 - float */ address StubGenerator::generate_float16ToFloat() { - StubCodeMark mark(this, "StubRoutines", "float16ToFloat"); + StubGenStubId stub_id = StubGenStubId::hf2f_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3629,7 +3749,8 @@ address StubGenerator::generate_float16ToFloat() { * rax - float16 jshort */ address StubGenerator::generate_floatToFloat16() { - StubCodeMark mark(this, "StubRoutines", "floatToFloat16"); + StubGenStubId stub_id = StubGenStubId::f2hf_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3644,13 +3765,33 @@ address StubGenerator::generate_floatToFloat16() { return start; } -address StubGenerator::generate_cont_thaw(const char* label, Continuation::thaw_kind kind) { +address StubGenerator::generate_cont_thaw(StubGenStubId stub_id) { if (!Continuations::enabled()) return nullptr; - bool return_barrier = Continuation::is_thaw_return_barrier(kind); - bool return_barrier_exception = Continuation::is_thaw_return_barrier_exception(kind); - - StubCodeMark mark(this, "StubRoutines", label); + bool return_barrier; + bool return_barrier_exception; + Continuation::thaw_kind kind; + + switch (stub_id) { + case cont_thaw_id: + return_barrier = false; + return_barrier_exception = false; + kind = Continuation::thaw_top; + break; + case cont_returnBarrier_id: + return_barrier = true; + return_barrier_exception = false; + kind = Continuation::thaw_return_barrier; + break; + case cont_returnBarrierExc_id: + return_barrier = true; + return_barrier_exception = true; + kind = Continuation::thaw_return_barrier_exception; + break; + default: + ShouldNotReachHere(); + } + StubCodeMark mark(this, stub_id); address start = __ pc(); // TODO: Handle Valhalla return types. May require generating different return barriers. @@ -3768,22 +3909,23 @@ address StubGenerator::generate_cont_thaw(const char* label, Continuation::thaw_ } address StubGenerator::generate_cont_thaw() { - return generate_cont_thaw("Cont thaw", Continuation::thaw_top); + return generate_cont_thaw(StubGenStubId::cont_thaw_id); } // TODO: will probably need multiple return barriers depending on return type address StubGenerator::generate_cont_returnBarrier() { - return generate_cont_thaw("Cont thaw return barrier", Continuation::thaw_return_barrier); + return generate_cont_thaw(StubGenStubId::cont_returnBarrier_id); } address StubGenerator::generate_cont_returnBarrier_exception() { - return generate_cont_thaw("Cont thaw return barrier exception", Continuation::thaw_return_barrier_exception); + return generate_cont_thaw(StubGenStubId::cont_returnBarrierExc_id); } address StubGenerator::generate_cont_preempt_stub() { if (!Continuations::enabled()) return nullptr; - StubCodeMark mark(this, "StubRoutines","Continuation preempt stub"); + StubGenStubId stub_id = StubGenStubId::cont_preempt_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ reset_last_Java_frame(true); @@ -3813,7 +3955,8 @@ address StubGenerator::generate_cont_preempt_stub() { // exception handler for upcall stubs address StubGenerator::generate_upcall_stub_exception_handler() { - StubCodeMark mark(this, "StubRoutines", "upcall stub exception handler"); + StubGenStubId stub_id = StubGenStubId::upcall_stub_exception_handler_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); // native caller has no idea how to handle exceptions @@ -3833,7 +3976,8 @@ address StubGenerator::generate_upcall_stub_exception_handler() { // j_rarg0 = jobject receiver // rbx = result address StubGenerator::generate_upcall_stub_load_target() { - StubCodeMark mark(this, "StubRoutines", "upcall_stub_load_target"); + StubGenStubId stub_id = StubGenStubId::upcall_stub_load_target_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ resolve_global_jobject(j_rarg0, r15_thread, rscratch1); @@ -3851,28 +3995,29 @@ address StubGenerator::generate_upcall_stub_load_target() { return start; } -address StubGenerator::generate_lookup_secondary_supers_table_stub(u1 super_klass_index) { - StubCodeMark mark(this, "StubRoutines", "lookup_secondary_supers_table"); - - address start = __ pc(); +void StubGenerator::generate_lookup_secondary_supers_table_stub() { + StubGenStubId stub_id = StubGenStubId::lookup_secondary_supers_table_id; + StubCodeMark mark(this, stub_id); const Register r_super_klass = rax, r_sub_klass = rsi, result = rdi; - __ lookup_secondary_supers_table_const(r_sub_klass, r_super_klass, - rdx, rcx, rbx, r11, // temps - result, - super_klass_index); - __ ret(0); - - return start; + for (int slot = 0; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) { + StubRoutines::_lookup_secondary_supers_table_stubs[slot] = __ pc(); + __ lookup_secondary_supers_table_const(r_sub_klass, r_super_klass, + rdx, rcx, rbx, r11, // temps + result, + slot); + __ ret(0); + } } // Slow path implementation for UseSecondarySupersTable. address StubGenerator::generate_lookup_secondary_supers_table_slow_path_stub() { - StubCodeMark mark(this, "StubRoutines", "lookup_secondary_supers_table"); + StubGenStubId stub_id = StubGenStubId::lookup_secondary_supers_table_slow_path_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3945,10 +4090,10 @@ void StubGenerator::generate_initial_stubs() { StubRoutines::x86::_d2i_fixup = generate_d2i_fixup(); StubRoutines::x86::_d2l_fixup = generate_d2l_fixup(); - StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); - StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); - StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); - StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); + StubRoutines::x86::_float_sign_mask = generate_fp_mask(StubGenStubId::float_sign_mask_id, 0x7FFFFFFF7FFFFFFF); + StubRoutines::x86::_float_sign_flip = generate_fp_mask(StubGenStubId::float_sign_flip_id, 0x8000000080000000); + StubRoutines::x86::_double_sign_mask = generate_fp_mask(StubGenStubId::double_sign_mask_id, 0x7FFFFFFFFFFFFFFF); + StubRoutines::x86::_double_sign_flip = generate_fp_mask(StubGenStubId::double_sign_flip_id, 0x8000000000000000); if (UseCRC32Intrinsics) { // set table address before stub generation which use it @@ -3994,10 +4139,6 @@ void StubGenerator::generate_final_stubs() { StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); } - // data cache line writeback - StubRoutines::_data_cache_writeback = generate_data_cache_writeback(); - StubRoutines::_data_cache_writeback_sync = generate_data_cache_writeback_sync(); - // arraycopy stubs used by compilers generate_arraycopy_stubs(); @@ -4006,6 +4147,15 @@ void StubGenerator::generate_final_stubs() { StubRoutines::_method_entry_barrier = generate_method_entry_barrier(); } +#ifdef COMPILER2 + if (UseSecondarySupersTable) { + StubRoutines::_lookup_secondary_supers_table_slow_path_stub = generate_lookup_secondary_supers_table_slow_path_stub(); + if (! InlineSecondarySupersTest) { + generate_lookup_secondary_supers_table_stub(); + } + } +#endif // COMPILER2 + if (UseVectorizedMismatchIntrinsic) { StubRoutines::_vectorizedMismatch = generate_vectorizedMismatch(); } @@ -4019,42 +4169,42 @@ void StubGenerator::generate_compiler_stubs() { // Entry points that are C2 compiler specific. - StubRoutines::x86::_vector_float_sign_mask = generate_vector_mask("vector_float_sign_mask", 0x7FFFFFFF7FFFFFFF); - StubRoutines::x86::_vector_float_sign_flip = generate_vector_mask("vector_float_sign_flip", 0x8000000080000000); - StubRoutines::x86::_vector_double_sign_mask = generate_vector_mask("vector_double_sign_mask", 0x7FFFFFFFFFFFFFFF); - StubRoutines::x86::_vector_double_sign_flip = generate_vector_mask("vector_double_sign_flip", 0x8000000000000000); - StubRoutines::x86::_vector_all_bits_set = generate_vector_mask("vector_all_bits_set", 0xFFFFFFFFFFFFFFFF); - StubRoutines::x86::_vector_int_mask_cmp_bits = generate_vector_mask("vector_int_mask_cmp_bits", 0x0000000100000001); - StubRoutines::x86::_vector_short_to_byte_mask = generate_vector_mask("vector_short_to_byte_mask", 0x00ff00ff00ff00ff); - StubRoutines::x86::_vector_byte_perm_mask = generate_vector_byte_perm_mask("vector_byte_perm_mask"); - StubRoutines::x86::_vector_int_to_byte_mask = generate_vector_mask("vector_int_to_byte_mask", 0x000000ff000000ff); - StubRoutines::x86::_vector_int_to_short_mask = generate_vector_mask("vector_int_to_short_mask", 0x0000ffff0000ffff); - StubRoutines::x86::_vector_32_bit_mask = generate_vector_custom_i32("vector_32_bit_mask", Assembler::AVX_512bit, + StubRoutines::x86::_vector_float_sign_mask = generate_vector_mask(StubGenStubId::vector_float_sign_mask_id, 0x7FFFFFFF7FFFFFFF); + StubRoutines::x86::_vector_float_sign_flip = generate_vector_mask(StubGenStubId::vector_float_sign_flip_id, 0x8000000080000000); + StubRoutines::x86::_vector_double_sign_mask = generate_vector_mask(StubGenStubId::vector_double_sign_mask_id, 0x7FFFFFFFFFFFFFFF); + StubRoutines::x86::_vector_double_sign_flip = generate_vector_mask(StubGenStubId::vector_double_sign_flip_id, 0x8000000000000000); + StubRoutines::x86::_vector_all_bits_set = generate_vector_mask(StubGenStubId::vector_all_bits_set_id, 0xFFFFFFFFFFFFFFFF); + StubRoutines::x86::_vector_int_mask_cmp_bits = generate_vector_mask(StubGenStubId::vector_int_mask_cmp_bits_id, 0x0000000100000001); + StubRoutines::x86::_vector_short_to_byte_mask = generate_vector_mask(StubGenStubId::vector_short_to_byte_mask_id, 0x00ff00ff00ff00ff); + StubRoutines::x86::_vector_byte_perm_mask = generate_vector_byte_perm_mask(); + StubRoutines::x86::_vector_int_to_byte_mask = generate_vector_mask(StubGenStubId::vector_int_to_byte_mask_id, 0x000000ff000000ff); + StubRoutines::x86::_vector_int_to_short_mask = generate_vector_mask(StubGenStubId::vector_int_to_short_mask_id, 0x0000ffff0000ffff); + StubRoutines::x86::_vector_32_bit_mask = generate_vector_custom_i32(StubGenStubId::vector_32_bit_mask_id, Assembler::AVX_512bit, 0xFFFFFFFF, 0, 0, 0); - StubRoutines::x86::_vector_64_bit_mask = generate_vector_custom_i32("vector_64_bit_mask", Assembler::AVX_512bit, + StubRoutines::x86::_vector_64_bit_mask = generate_vector_custom_i32(StubGenStubId::vector_64_bit_mask_id, Assembler::AVX_512bit, 0xFFFFFFFF, 0xFFFFFFFF, 0, 0); - StubRoutines::x86::_vector_int_shuffle_mask = generate_vector_mask("vector_int_shuffle_mask", 0x0302010003020100); - StubRoutines::x86::_vector_byte_shuffle_mask = generate_vector_byte_shuffle_mask("vector_byte_shuffle_mask"); - StubRoutines::x86::_vector_short_shuffle_mask = generate_vector_mask("vector_short_shuffle_mask", 0x0100010001000100); - StubRoutines::x86::_vector_long_shuffle_mask = generate_vector_mask("vector_long_shuffle_mask", 0x0000000100000000); - StubRoutines::x86::_vector_long_sign_mask = generate_vector_mask("vector_long_sign_mask", 0x8000000000000000); - StubRoutines::x86::_vector_iota_indices = generate_iota_indices("iota_indices"); - StubRoutines::x86::_vector_count_leading_zeros_lut = generate_count_leading_zeros_lut("count_leading_zeros_lut"); - StubRoutines::x86::_vector_reverse_bit_lut = generate_vector_reverse_bit_lut("reverse_bit_lut"); - StubRoutines::x86::_vector_reverse_byte_perm_mask_long = generate_vector_reverse_byte_perm_mask_long("perm_mask_long"); - StubRoutines::x86::_vector_reverse_byte_perm_mask_int = generate_vector_reverse_byte_perm_mask_int("perm_mask_int"); - StubRoutines::x86::_vector_reverse_byte_perm_mask_short = generate_vector_reverse_byte_perm_mask_short("perm_mask_short"); + StubRoutines::x86::_vector_int_shuffle_mask = generate_vector_mask(StubGenStubId::vector_int_shuffle_mask_id, 0x0302010003020100); + StubRoutines::x86::_vector_byte_shuffle_mask = generate_vector_byte_shuffle_mask(); + StubRoutines::x86::_vector_short_shuffle_mask = generate_vector_mask(StubGenStubId::vector_short_shuffle_mask_id, 0x0100010001000100); + StubRoutines::x86::_vector_long_shuffle_mask = generate_vector_mask(StubGenStubId::vector_long_shuffle_mask_id, 0x0000000100000000); + StubRoutines::x86::_vector_long_sign_mask = generate_vector_mask(StubGenStubId::vector_long_sign_mask_id, 0x8000000000000000); + StubRoutines::x86::_vector_iota_indices = generate_iota_indices(); + StubRoutines::x86::_vector_count_leading_zeros_lut = generate_count_leading_zeros_lut(); + StubRoutines::x86::_vector_reverse_bit_lut = generate_vector_reverse_bit_lut(); + StubRoutines::x86::_vector_reverse_byte_perm_mask_long = generate_vector_reverse_byte_perm_mask_long(); + StubRoutines::x86::_vector_reverse_byte_perm_mask_int = generate_vector_reverse_byte_perm_mask_int(); + StubRoutines::x86::_vector_reverse_byte_perm_mask_short = generate_vector_reverse_byte_perm_mask_short(); if (VM_Version::supports_avx2() && !VM_Version::supports_avx512vl()) { - StubRoutines::x86::_compress_perm_table32 = generate_compress_perm_table("compress_perm_table32", 32); - StubRoutines::x86::_compress_perm_table64 = generate_compress_perm_table("compress_perm_table64", 64); - StubRoutines::x86::_expand_perm_table32 = generate_expand_perm_table("expand_perm_table32", 32); - StubRoutines::x86::_expand_perm_table64 = generate_expand_perm_table("expand_perm_table64", 64); + StubRoutines::x86::_compress_perm_table32 = generate_compress_perm_table(StubGenStubId::compress_perm_table32_id); + StubRoutines::x86::_compress_perm_table64 = generate_compress_perm_table(StubGenStubId::compress_perm_table64_id); + StubRoutines::x86::_expand_perm_table32 = generate_expand_perm_table(StubGenStubId::expand_perm_table32_id); + StubRoutines::x86::_expand_perm_table64 = generate_expand_perm_table(StubGenStubId::expand_perm_table64_id); } if (VM_Version::supports_avx2() && !VM_Version::supports_avx512_vpopcntdq()) { // lut implementation influenced by counting 1s algorithm from section 5-1 of Hackers' Delight. - StubRoutines::x86::_vector_popcount_lut = generate_popcount_avx_lut("popcount_lut"); + StubRoutines::x86::_vector_popcount_lut = generate_popcount_avx_lut(); } generate_aes_stubs(); @@ -4065,6 +4215,10 @@ void StubGenerator::generate_compiler_stubs() { generate_sha3_stubs(); + // data cache line writeback + StubRoutines::_data_cache_writeback = generate_data_cache_writeback(); + StubRoutines::_data_cache_writeback_sync = generate_data_cache_writeback_sync(); + #ifdef COMPILER2 if ((UseAVX == 2) && EnableX86ECoreOpts) { generate_string_indexof(StubRoutines::_string_indexof_array); @@ -4085,15 +4239,15 @@ void StubGenerator::generate_compiler_stubs() { } if (UseMD5Intrinsics) { - StubRoutines::_md5_implCompress = generate_md5_implCompress(false, "md5_implCompress"); - StubRoutines::_md5_implCompressMB = generate_md5_implCompress(true, "md5_implCompressMB"); + StubRoutines::_md5_implCompress = generate_md5_implCompress(StubGenStubId::md5_implCompress_id); + StubRoutines::_md5_implCompressMB = generate_md5_implCompress(StubGenStubId::md5_implCompressMB_id); } if (UseSHA1Intrinsics) { StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask(); StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask(); - StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress"); - StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB"); + StubRoutines::_sha1_implCompress = generate_sha1_implCompress(StubGenStubId::sha1_implCompress_id); + StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(StubGenStubId::sha1_implCompressMB_id); } if (UseSHA256Intrinsics) { @@ -4106,15 +4260,15 @@ void StubGenerator::generate_compiler_stubs() { } StubRoutines::x86::_k256_W_adr = (address)StubRoutines::x86::_k256_W; StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask(); - StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress"); - StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB"); + StubRoutines::_sha256_implCompress = generate_sha256_implCompress(StubGenStubId::sha256_implCompress_id); + StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(StubGenStubId::sha256_implCompressMB_id); } if (UseSHA512Intrinsics) { StubRoutines::x86::_k512_W_addr = (address)StubRoutines::x86::_k512_W; StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = generate_pshuffle_byte_flip_mask_sha512(); - StubRoutines::_sha512_implCompress = generate_sha512_implCompress(false, "sha512_implCompress"); - StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB"); + StubRoutines::_sha512_implCompress = generate_sha512_implCompress(StubGenStubId::sha512_implCompress_id); + StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(StubGenStubId::sha512_implCompressMB_id); } if (UseBASE64Intrinsics) { @@ -4156,14 +4310,6 @@ void StubGenerator::generate_compiler_stubs() { StubRoutines::_bigIntegerRightShiftWorker = generate_bigIntegerRightShift(); StubRoutines::_bigIntegerLeftShiftWorker = generate_bigIntegerLeftShift(); } - if (UseSecondarySupersTable) { - StubRoutines::_lookup_secondary_supers_table_slow_path_stub = generate_lookup_secondary_supers_table_slow_path_stub(); - if (! InlineSecondarySupersTest) { - for (int slot = 0; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) { - StubRoutines::_lookup_secondary_supers_table_stubs[slot] = generate_lookup_secondary_supers_table_stub(slot); - } - } - } if (UseMontgomeryMultiplyIntrinsic) { StubRoutines::_montgomeryMultiply = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); @@ -4262,29 +4408,28 @@ void StubGenerator::generate_compiler_stubs() { #endif // COMPILER2_OR_JVMCI } -StubGenerator::StubGenerator(CodeBuffer* code, StubsKind kind) : StubCodeGenerator(code) { - DEBUG_ONLY( _regs_in_thread = false; ) - switch(kind) { - case Initial_stubs: - generate_initial_stubs(); - break; - case Continuation_stubs: - generate_continuation_stubs(); - break; - case Compiler_stubs: - generate_compiler_stubs(); - break; - case Final_stubs: - generate_final_stubs(); - break; - default: - fatal("unexpected stubs kind: %d", kind); - break; - }; +StubGenerator::StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) { + switch(blob_id) { + case initial_id: + generate_initial_stubs(); + break; + case continuation_id: + generate_continuation_stubs(); + break; + case compiler_id: + generate_compiler_stubs(); + break; + case final_id: + generate_final_stubs(); + break; + default: + fatal("unexpected blob id: %d", blob_id); + break; + }; } -void StubGenerator_generate(CodeBuffer* code, StubCodeGenerator::StubsKind kind) { - StubGenerator g(code, kind); +void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id) { + StubGenerator g(code, blob_id); } #undef __ diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64.hpp b/src/hotspot/cpu/x86/stubGenerator_x86_64.hpp index f883b6453a690..2263188216c41 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64.hpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,7 @@ #include "code/codeBlob.hpp" #include "runtime/continuation.hpp" #include "runtime/stubCodeGenerator.hpp" +#include "runtime/stubRoutines.hpp" // Stub Code definitions @@ -87,29 +88,29 @@ class StubGenerator: public StubCodeGenerator { address generate_d2i_fixup(); address generate_d2l_fixup(); - address generate_count_leading_zeros_lut(const char *stub_name); - address generate_popcount_avx_lut(const char *stub_name); - address generate_iota_indices(const char *stub_name); - address generate_vector_reverse_bit_lut(const char *stub_name); + address generate_count_leading_zeros_lut(); + address generate_popcount_avx_lut(); + address generate_iota_indices(); + address generate_vector_reverse_bit_lut(); - address generate_vector_reverse_byte_perm_mask_long(const char *stub_name); - address generate_vector_reverse_byte_perm_mask_int(const char *stub_name); - address generate_vector_reverse_byte_perm_mask_short(const char *stub_name); - address generate_vector_byte_shuffle_mask(const char *stub_name); + address generate_vector_reverse_byte_perm_mask_long(); + address generate_vector_reverse_byte_perm_mask_int(); + address generate_vector_reverse_byte_perm_mask_short(); + address generate_vector_byte_shuffle_mask(); - address generate_fp_mask(const char *stub_name, int64_t mask); + address generate_fp_mask(StubGenStubId stub_id, int64_t mask); - address generate_compress_perm_table(const char *stub_name, int32_t esize); + address generate_compress_perm_table(StubGenStubId stub_id); - address generate_expand_perm_table(const char *stub_name, int32_t esize); + address generate_expand_perm_table(StubGenStubId stub_id); - address generate_vector_mask(const char *stub_name, int64_t mask); + address generate_vector_mask(StubGenStubId stub_id, int64_t mask); - address generate_vector_byte_perm_mask(const char *stub_name); + address generate_vector_byte_perm_mask(); - address generate_vector_fp_mask(const char *stub_name, int64_t mask); + address generate_vector_fp_mask(StubGenStubId stub_id, int64_t mask); - address generate_vector_custom_i32(const char *stub_name, Assembler::AvxVectorLen len, + address generate_vector_custom_i32(StubGenStubId stub_id, Assembler::AvxVectorLen len, int32_t val0, int32_t val1, int32_t val2, int32_t val3, int32_t val4 = 0, int32_t val5 = 0, int32_t val6 = 0, int32_t val7 = 0, int32_t val8 = 0, int32_t val9 = 0, int32_t val10 = 0, int32_t val11 = 0, @@ -179,12 +180,10 @@ class StubGenerator: public StubCodeGenerator { // - If user sets AVX3Threshold=0, then special cases for small blocks sizes operate over // 64 byte vector registers (ZMMs). - address generate_disjoint_copy_avx3_masked(address* entry, const char *name, int shift, - bool aligned, bool is_oop, bool dest_uninitialized); + address generate_disjoint_copy_avx3_masked(StubGenStubId stub_id, address* entry); - address generate_conjoint_copy_avx3_masked(address* entry, const char *name, int shift, - address nooverlap_target, bool aligned, bool is_oop, - bool dest_uninitialized); + address generate_conjoint_copy_avx3_masked(StubGenStubId stub_id, address* entry, + address nooverlap_target); void arraycopy_avx3_special_cases(XMMRegister xmm, KRegister mask, Register from, Register to, Register count, int shift, @@ -225,27 +224,21 @@ class StubGenerator: public StubCodeGenerator { Register temp, int shift = Address::times_1, int offset = 0); #endif // COMPILER2_OR_JVMCI - address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name); + address generate_disjoint_byte_copy(address* entry); - address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, - address* entry, const char *name); + address generate_conjoint_byte_copy(address nooverlap_target, address* entry); - address generate_disjoint_short_copy(bool aligned, address *entry, const char *name); + address generate_disjoint_short_copy(address *entry); - address generate_fill(BasicType t, bool aligned, const char *name); + address generate_fill(StubGenStubId stub_id); - address generate_conjoint_short_copy(bool aligned, address nooverlap_target, - address *entry, const char *name); - address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, - const char *name, bool dest_uninitialized = false); - address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target, - address *entry, const char *name, - bool dest_uninitialized = false); - address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, - const char *name, bool dest_uninitialized = false); - address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, - address nooverlap_target, address *entry, - const char *name, bool dest_uninitialized = false); + address generate_conjoint_short_copy(address nooverlap_target, address *entry); + address generate_disjoint_int_oop_copy(StubGenStubId stub_id, address* entry); + address generate_conjoint_int_oop_copy(StubGenStubId stub_id, address nooverlap_target, + address *entry); + address generate_disjoint_long_oop_copy(StubGenStubId stub_id, address* entry); + address generate_conjoint_long_oop_copy(StubGenStubId stub_id, address nooverlap_target, + address *entry); // Helper for generating a dynamic type check. // Smashes no registers. @@ -255,8 +248,7 @@ class StubGenerator: public StubCodeGenerator { Label& L_success); // Generate checkcasting array copy stub - address generate_checkcast_copy(const char *name, address *entry, - bool dest_uninitialized = false); + address generate_checkcast_copy(StubGenStubId stub_id, address *entry); // Generate 'unsafe' array copy stub // Though just as safe as the other stubs, it takes an unscaled @@ -264,8 +256,7 @@ class StubGenerator: public StubCodeGenerator { // // Examines the alignment of the operands and dispatches // to a long, int, short, or byte copy loop. - address generate_unsafe_copy(const char *name, - address byte_copy_entry, address short_copy_entry, + address generate_unsafe_copy(address byte_copy_entry, address short_copy_entry, address int_copy_entry, address long_copy_entry); // Generate 'unsafe' set memory stub @@ -274,7 +265,7 @@ class StubGenerator: public StubCodeGenerator { // // Examines the alignment of the operands and dispatches // to an int, short, or byte copy loop. - address generate_unsafe_setmemory(const char *name, address byte_copy_entry); + address generate_unsafe_setmemory(address byte_copy_entry); // Perform range checks on the proposed arraycopy. // Kills temp, but nothing else. @@ -288,8 +279,7 @@ class StubGenerator: public StubCodeGenerator { Label& L_failed); // Generate generic array copy stubs - address generate_generic_copy(const char *name, - address byte_copy_entry, address short_copy_entry, + address generate_generic_copy(address byte_copy_entry, address short_copy_entry, address int_copy_entry, address oop_copy_entry, address long_copy_entry, address checkcast_copy_entry); @@ -304,19 +294,19 @@ class StubGenerator: public StubCodeGenerator { // ofs and limit are use for multi-block byte array. // int com.sun.security.provider.MD5.implCompress(byte[] b, int ofs) - address generate_md5_implCompress(bool multi_block, const char *name); + address generate_md5_implCompress(StubGenStubId stub_id); // SHA stubs // ofs and limit are use for multi-block byte array. // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) - address generate_sha1_implCompress(bool multi_block, const char *name); + address generate_sha1_implCompress(StubGenStubId stub_id); // ofs and limit are use for multi-block byte array. // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) - address generate_sha256_implCompress(bool multi_block, const char *name); - address generate_sha512_implCompress(bool multi_block, const char *name); + address generate_sha256_implCompress(StubGenStubId stub_id); + address generate_sha512_implCompress(StubGenStubId stub_id); // Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. address generate_pshuffle_byte_flip_mask_sha512(); @@ -499,7 +489,7 @@ class StubGenerator: public StubCodeGenerator { // SHA3 stubs void generate_sha3_stubs(); - address generate_sha3_implCompress(bool multiBlock, const char *name); + address generate_sha3_implCompress(StubGenStubId stub_id); // BASE64 stubs @@ -595,7 +585,7 @@ class StubGenerator: public StubCodeGenerator { void generate_string_indexof(address *fnptrs); #endif - address generate_cont_thaw(const char* label, Continuation::thaw_kind kind); + address generate_cont_thaw(StubGenStubId stub_id); address generate_cont_thaw(); // TODO: will probably need multiple return barriers depending on return type @@ -604,6 +594,8 @@ class StubGenerator: public StubCodeGenerator { address generate_cont_preempt_stub(); + // TODO -- delete this as it is not implemented? + // // Continuation point for throwing of implicit exceptions that are // not handled in the current activation. Fabricates an exception // oop and initiates normal exception dispatching in this @@ -629,7 +621,7 @@ class StubGenerator: public StubCodeGenerator { address generate_upcall_stub_load_target(); // Specialized stub implementations for UseSecondarySupersTable. - address generate_lookup_secondary_supers_table_stub(u1 super_klass_index); + void generate_lookup_secondary_supers_table_stub(); // Slow path implementation for UseSecondarySupersTable. address generate_lookup_secondary_supers_table_slow_path_stub(); @@ -642,8 +634,8 @@ class StubGenerator: public StubCodeGenerator { void generate_compiler_stubs(); void generate_final_stubs(); - public: - StubGenerator(CodeBuffer* code, StubsKind kind); +public: + StubGenerator(CodeBuffer* code, StubGenBlobId blob_id); }; #endif // CPU_X86_STUBGENERATOR_X86_64_HPP diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_adler.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_adler.cpp index 287fd005c2c95..8a2aeaa5887a6 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_adler.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_adler.cpp @@ -1,5 +1,5 @@ /* -* Copyright (c) 2021, 2023, Intel Corporation. All rights reserved. +* Copyright (c) 2021, 2024, Intel Corporation. All rights reserved. * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -66,7 +66,8 @@ address StubGenerator::generate_updateBytesAdler32() { assert(UseAdler32Intrinsics, ""); __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "updateBytesAdler32"); + StubGenStubId stub_id = StubGenStubId::updateBytesAdler32_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); // Choose an appropriate LIMIT for inner loop based on the granularity diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_aes.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_aes.cpp index b6dc046d172a4..26bf3f7d725e7 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_aes.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_aes.cpp @@ -249,7 +249,8 @@ void StubGenerator::generate_aes_stubs() { // rax - number of processed bytes address StubGenerator::generate_galoisCounterMode_AESCrypt() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "galoisCounterMode_AESCrypt"); + StubGenStubId stub_id = StubGenStubId::galoisCounterMode_AESCrypt_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); const Register in = c_rarg0; @@ -335,7 +336,8 @@ address StubGenerator::generate_galoisCounterMode_AESCrypt() { // rax - number of processed bytes address StubGenerator::generate_avx2_galoisCounterMode_AESCrypt() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "galoisCounterMode_AESCrypt"); + StubGenStubId stub_id = StubGenStubId::galoisCounterMode_AESCrypt_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); const Register in = c_rarg0; @@ -406,7 +408,8 @@ address StubGenerator::generate_avx2_galoisCounterMode_AESCrypt() { // Vector AES Counter implementation address StubGenerator::generate_counterMode_VectorAESCrypt() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt"); + StubGenStubId stub_id = StubGenStubId::counterMode_AESCrypt_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); const Register from = c_rarg0; // source array address @@ -494,7 +497,8 @@ address StubGenerator::generate_counterMode_VectorAESCrypt() { address StubGenerator::generate_counterMode_AESCrypt_Parallel() { assert(UseAES, "need AES instructions and misaligned SSE support"); __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt"); + StubGenStubId stub_id = StubGenStubId::counterMode_AESCrypt_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); const Register from = c_rarg0; // source array address @@ -781,7 +785,8 @@ address StubGenerator::generate_counterMode_AESCrypt_Parallel() { address StubGenerator::generate_cipherBlockChaining_decryptVectorAESCrypt() { assert(VM_Version::supports_avx512_vaes(), "need AES instructions and misaligned SSE support"); __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); + StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_decryptAESCrypt_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); const Register from = c_rarg0; // source array address @@ -1063,7 +1068,8 @@ address StubGenerator::generate_cipherBlockChaining_decryptVectorAESCrypt() { address StubGenerator::generate_aescrypt_encryptBlock() { assert(UseAES, "need AES instructions and misaligned SSE support"); __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); + StubGenStubId stub_id = StubGenStubId::aescrypt_encryptBlock_id; + StubCodeMark mark(this, stub_id); Label L_doLast; address start = __ pc(); @@ -1157,7 +1163,8 @@ address StubGenerator::generate_aescrypt_encryptBlock() { address StubGenerator::generate_aescrypt_decryptBlock() { assert(UseAES, "need AES instructions and misaligned SSE support"); __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); + StubGenStubId stub_id = StubGenStubId::aescrypt_decryptBlock_id; + StubCodeMark mark(this, stub_id); Label L_doLast; address start = __ pc(); @@ -1258,7 +1265,8 @@ address StubGenerator::generate_aescrypt_decryptBlock() { address StubGenerator::generate_cipherBlockChaining_encryptAESCrypt() { assert(UseAES, "need AES instructions and misaligned SSE support"); __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); + StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_encryptAESCrypt_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; @@ -1409,7 +1417,8 @@ address StubGenerator::generate_cipherBlockChaining_encryptAESCrypt() { address StubGenerator::generate_cipherBlockChaining_decryptAESCrypt_Parallel() { assert(UseAES, "need AES instructions and misaligned SSE support"); __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); + StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_decryptAESCrypt_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); const Register from = c_rarg0; // source array address @@ -1651,7 +1660,8 @@ __ opc(xmm_result3, src_reg); \ address StubGenerator::generate_electronicCodeBook_encryptAESCrypt() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "electronicCodeBook_encryptAESCrypt"); + StubGenStubId stub_id = StubGenStubId::electronicCodeBook_encryptAESCrypt_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); const Register from = c_rarg0; // source array address @@ -1671,7 +1681,8 @@ address StubGenerator::generate_electronicCodeBook_encryptAESCrypt() { address StubGenerator::generate_electronicCodeBook_decryptAESCrypt() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "electronicCodeBook_decryptAESCrypt"); + StubGenStubId stub_id = StubGenStubId::electronicCodeBook_decryptAESCrypt_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); const Register from = c_rarg0; // source array address diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_arraycopy.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_arraycopy.cpp index ad2638dd25620..ccc8e456d5717 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_arraycopy.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_arraycopy.cpp @@ -84,74 +84,51 @@ void StubGenerator::generate_arraycopy_stubs() { address entry_jlong_arraycopy; address entry_checkcast_arraycopy; - StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, - "jbyte_disjoint_arraycopy"); - StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy, - "jbyte_arraycopy"); - - StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, - "jshort_disjoint_arraycopy"); - StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy, - "jshort_arraycopy"); - - StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry, - "jint_disjoint_arraycopy"); - StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry, - &entry_jint_arraycopy, "jint_arraycopy"); - - StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry, - "jlong_disjoint_arraycopy"); - StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry, - &entry_jlong_arraycopy, "jlong_arraycopy"); + StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(&entry); + StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(entry, &entry_jbyte_arraycopy); + + StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(&entry); + StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(entry, &entry_jshort_arraycopy); + + StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(StubGenStubId::jint_disjoint_arraycopy_id, &entry); + StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(StubGenStubId::jint_arraycopy_id, entry, &entry_jint_arraycopy); + + StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(StubGenStubId::jlong_disjoint_arraycopy_id, &entry); + StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(StubGenStubId::jlong_arraycopy_id, entry, &entry_jlong_arraycopy); if (UseCompressedOops) { - StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry, - "oop_disjoint_arraycopy"); - StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry, - &entry_oop_arraycopy, "oop_arraycopy"); - StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry, - "oop_disjoint_arraycopy_uninit", - /*dest_uninitialized*/true); - StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry, - nullptr, "oop_arraycopy_uninit", - /*dest_uninitialized*/true); + StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(StubGenStubId::oop_disjoint_arraycopy_id, &entry); + StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(StubGenStubId::oop_arraycopy_id, entry, &entry_oop_arraycopy); + StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(StubGenStubId::oop_disjoint_arraycopy_uninit_id, &entry); + StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(StubGenStubId::oop_arraycopy_uninit_id, entry, nullptr); } else { - StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry, - "oop_disjoint_arraycopy"); - StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry, - &entry_oop_arraycopy, "oop_arraycopy"); - StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry, - "oop_disjoint_arraycopy_uninit", - /*dest_uninitialized*/true); - StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry, - nullptr, "oop_arraycopy_uninit", - /*dest_uninitialized*/true); - } - - StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); - StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", nullptr, - /*dest_uninitialized*/true); - - StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", - entry_jbyte_arraycopy, + StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(StubGenStubId::oop_disjoint_arraycopy_id, &entry); + StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(StubGenStubId::oop_arraycopy_id, entry, &entry_oop_arraycopy); + StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(StubGenStubId::oop_disjoint_arraycopy_uninit_id, &entry); + StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(StubGenStubId::oop_arraycopy_uninit_id, entry, nullptr); + } + + StubRoutines::_checkcast_arraycopy = generate_checkcast_copy(StubGenStubId::checkcast_arraycopy_id, &entry_checkcast_arraycopy); + StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy(StubGenStubId::checkcast_arraycopy_uninit_id, nullptr); + + StubRoutines::_unsafe_arraycopy = generate_unsafe_copy(entry_jbyte_arraycopy, entry_jshort_arraycopy, entry_jint_arraycopy, entry_jlong_arraycopy); - StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", - entry_jbyte_arraycopy, + StubRoutines::_generic_arraycopy = generate_generic_copy(entry_jbyte_arraycopy, entry_jshort_arraycopy, entry_jint_arraycopy, entry_oop_arraycopy, entry_jlong_arraycopy, entry_checkcast_arraycopy); - StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); - StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); - StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); - StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); - StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); - StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); + StubRoutines::_jbyte_fill = generate_fill(StubGenStubId::jbyte_fill_id); + StubRoutines::_jshort_fill = generate_fill(StubGenStubId::jshort_fill_id); + StubRoutines::_jint_fill = generate_fill(StubGenStubId::jint_fill_id); + StubRoutines::_arrayof_jbyte_fill = generate_fill(StubGenStubId::arrayof_jbyte_fill_id); + StubRoutines::_arrayof_jshort_fill = generate_fill(StubGenStubId::arrayof_jshort_fill_id); + StubRoutines::_arrayof_jint_fill = generate_fill(StubGenStubId::arrayof_jint_fill_id); - StubRoutines::_unsafe_setmemory = generate_unsafe_setmemory("unsafe_setmemory", StubRoutines::_jbyte_fill); + StubRoutines::_unsafe_setmemory = generate_unsafe_setmemory(StubRoutines::_jbyte_fill); // We don't generate specialized code for HeapWord-aligned source // arrays, so just use the code we've already generated @@ -507,11 +484,50 @@ void StubGenerator::copy_bytes_backward(Register from, Register dest, // disjoint_copy_avx3_masked is set to the no-overlap entry point // used by generate_conjoint_[byte/int/short/long]_copy(). // -address StubGenerator::generate_disjoint_copy_avx3_masked(address* entry, const char *name, - int shift, bool aligned, bool is_oop, - bool dest_uninitialized) { +address StubGenerator::generate_disjoint_copy_avx3_masked(StubGenStubId stub_id, address* entry) { + // aligned is always false -- x86_64 always uses the unaligned code + const bool aligned = false; + int shift; + bool is_oop; + bool dest_uninitialized; + + switch (stub_id) { + case jbyte_disjoint_arraycopy_id: + shift = 0; + is_oop = false; + dest_uninitialized = false; + break; + case jshort_disjoint_arraycopy_id: + shift = 1; + is_oop = false; + dest_uninitialized = false; + break; + case jint_disjoint_arraycopy_id: + shift = 2; + is_oop = false; + dest_uninitialized = false; + break; + case jlong_disjoint_arraycopy_id: + shift = 3; + is_oop = false; + dest_uninitialized = false; + break; + case oop_disjoint_arraycopy_id: + shift = (UseCompressedOops ? 2 : 3); + is_oop = true; + dest_uninitialized = false; + break; + case oop_disjoint_arraycopy_uninit_id: + shift = (UseCompressedOops ? 2 : 3); + is_oop = true; + dest_uninitialized = true; + break; + default: + ShouldNotReachHere(); + } + __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); int avx3threshold = VM_Version::avx3_threshold(); @@ -806,11 +822,50 @@ void StubGenerator::arraycopy_avx3_large(Register to, Register from, Register te // c_rarg2 - element count, treated as ssize_t, can be zero // // -address StubGenerator::generate_conjoint_copy_avx3_masked(address* entry, const char *name, int shift, - address nooverlap_target, bool aligned, - bool is_oop, bool dest_uninitialized) { +address StubGenerator::generate_conjoint_copy_avx3_masked(StubGenStubId stub_id, address* entry, address nooverlap_target) { + // aligned is always false -- x86_64 always uses the unaligned code + const bool aligned = false; + int shift; + bool is_oop; + bool dest_uninitialized; + + switch (stub_id) { + case jbyte_arraycopy_id: + shift = 0; + is_oop = false; + dest_uninitialized = false; + break; + case jshort_arraycopy_id: + shift = 1; + is_oop = false; + dest_uninitialized = false; + break; + case jint_arraycopy_id: + shift = 2; + is_oop = false; + dest_uninitialized = false; + break; + case jlong_arraycopy_id: + shift = 3; + is_oop = false; + dest_uninitialized = false; + break; + case oop_arraycopy_id: + shift = (UseCompressedOops ? 2 : 3); + is_oop = true; + dest_uninitialized = false; + break; + case oop_arraycopy_uninit_id: + shift = (UseCompressedOops ? 2 : 3); + is_oop = true; + dest_uninitialized = true; + break; + default: + ShouldNotReachHere(); + } + __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); int avx3threshold = VM_Version::avx3_threshold(); @@ -1262,9 +1317,7 @@ void StubGenerator::copy64_avx(Register dst, Register src, Register index, XMMRe // Arguments: -// aligned - true => Input and output aligned on a HeapWord == 8-byte boundary -// ignored -// name - stub name string +// entry - location for return of (post-push) entry // // Inputs: // c_rarg0 - source array address @@ -1277,18 +1330,20 @@ void StubGenerator::copy64_avx(Register dst, Register src, Register index, XMMRe // and stored atomically. // // Side Effects: -// disjoint_byte_copy_entry is set to the no-overlap entry point +// entry is set to the no-overlap entry point // used by generate_conjoint_byte_copy(). // -address StubGenerator::generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { +address StubGenerator::generate_disjoint_byte_copy(address* entry) { + StubGenStubId stub_id = StubGenStubId::jbyte_disjoint_arraycopy_id; + // aligned is always false -- x86_64 always uses the unaligned code + const bool aligned = false; #if COMPILER2_OR_JVMCI if (VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) { - return generate_disjoint_copy_avx3_masked(entry, "jbyte_disjoint_arraycopy_avx3", 0, - aligned, false, false); + return generate_disjoint_copy_avx3_masked(stub_id, entry); } #endif __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; @@ -1383,9 +1438,8 @@ __ BIND(L_exit); // Arguments: -// aligned - true => Input and output aligned on a HeapWord == 8-byte boundary -// ignored -// name - stub name string +// entry - location for return of (post-push) entry +// nooverlap_target - entry to branch to if no overlap detected // // Inputs: // c_rarg0 - source array address @@ -1397,16 +1451,17 @@ __ BIND(L_exit); // dwords or qwords that span cache line boundaries will still be loaded // and stored atomically. // -address StubGenerator::generate_conjoint_byte_copy(bool aligned, address nooverlap_target, - address* entry, const char *name) { +address StubGenerator::generate_conjoint_byte_copy(address nooverlap_target, address* entry) { + StubGenStubId stub_id = StubGenStubId::jbyte_arraycopy_id; + // aligned is always false -- x86_64 always uses the unaligned code + const bool aligned = false; #if COMPILER2_OR_JVMCI if (VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) { - return generate_conjoint_copy_avx3_masked(entry, "jbyte_conjoint_arraycopy_avx3", 0, - nooverlap_target, aligned, false, false); + return generate_conjoint_copy_avx3_masked(stub_id, entry, nooverlap_target); } #endif __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); DecoratorSet decorators = IN_HEAP | IS_ARRAY; @@ -1493,9 +1548,7 @@ address StubGenerator::generate_conjoint_byte_copy(bool aligned, address nooverl // Arguments: -// aligned - true => Input and output aligned on a HeapWord == 8-byte boundary -// ignored -// name - stub name string +// entry - location for return of (post-push) entry // // Inputs: // c_rarg0 - source array address @@ -1508,19 +1561,21 @@ address StubGenerator::generate_conjoint_byte_copy(bool aligned, address nooverl // and stored atomically. // // Side Effects: -// disjoint_short_copy_entry is set to the no-overlap entry point +// entry is set to the no-overlap entry point // used by generate_conjoint_short_copy(). // -address StubGenerator::generate_disjoint_short_copy(bool aligned, address *entry, const char *name) { +address StubGenerator::generate_disjoint_short_copy(address *entry) { + StubGenStubId stub_id = StubGenStubId::jshort_disjoint_arraycopy_id; + // aligned is always false -- x86_64 always uses the unaligned code + const bool aligned = false; #if COMPILER2_OR_JVMCI if (VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) { - return generate_disjoint_copy_avx3_masked(entry, "jshort_disjoint_arraycopy_avx3", 1, - aligned, false, false); + return generate_disjoint_copy_avx3_masked(stub_id, entry); } #endif __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; @@ -1607,9 +1662,41 @@ __ BIND(L_exit); } -address StubGenerator::generate_fill(BasicType t, bool aligned, const char *name) { +address StubGenerator::generate_fill(StubGenStubId stub_id) { + BasicType t; + bool aligned; + + switch (stub_id) { + case jbyte_fill_id: + t = T_BYTE; + aligned = false; + break; + case jshort_fill_id: + t = T_SHORT; + aligned = false; + break; + case jint_fill_id: + t = T_INT; + aligned = false; + break; + case arrayof_jbyte_fill_id: + t = T_BYTE; + aligned = true; + break; + case arrayof_jshort_fill_id: + t = T_SHORT; + aligned = true; + break; + case arrayof_jint_fill_id: + t = T_INT; + aligned = true; + break; + default: + ShouldNotReachHere(); + } + __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); BLOCK_COMMENT("Entry:"); @@ -1636,9 +1723,8 @@ address StubGenerator::generate_fill(BasicType t, bool aligned, const char *name // Arguments: -// aligned - true => Input and output aligned on a HeapWord == 8-byte boundary -// ignored -// name - stub name string +// entry - location for return of (post-push) entry +// nooverlap_target - entry to branch to if no overlap detected // // Inputs: // c_rarg0 - source array address @@ -1650,16 +1736,18 @@ address StubGenerator::generate_fill(BasicType t, bool aligned, const char *name // or qwords that span cache line boundaries will still be loaded // and stored atomically. // -address StubGenerator::generate_conjoint_short_copy(bool aligned, address nooverlap_target, - address *entry, const char *name) { +address StubGenerator::generate_conjoint_short_copy(address nooverlap_target, address *entry) { + StubGenStubId stub_id = StubGenStubId::jshort_arraycopy_id; + // aligned is always false -- x86_64 always uses the unaligned code + const bool aligned = false; #if COMPILER2_OR_JVMCI if (VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) { - return generate_conjoint_copy_avx3_masked(entry, "jshort_conjoint_arraycopy_avx3", 1, - nooverlap_target, aligned, false, false); + return generate_conjoint_copy_avx3_masked(stub_id, entry, nooverlap_target); } #endif + __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); DecoratorSet decorators = IN_HEAP | IS_ARRAY; @@ -1738,10 +1826,9 @@ address StubGenerator::generate_conjoint_short_copy(bool aligned, address noover // Arguments: -// aligned - true => Input and output aligned on a HeapWord == 8-byte boundary -// ignored -// is_oop - true => oop array, so generate store check code -// name - stub name string +// stub_id - unqiue id for stub to generate +// entry - location for return of (post-push) entry +// is_oop - true => oop array, so generate store check code // // Inputs: // c_rarg0 - source array address @@ -1756,18 +1843,39 @@ address StubGenerator::generate_conjoint_short_copy(bool aligned, address noover // disjoint_int_copy_entry is set to the no-overlap entry point // used by generate_conjoint_int_oop_copy(). // -address StubGenerator::generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, - const char *name, bool dest_uninitialized) { +address StubGenerator::generate_disjoint_int_oop_copy(StubGenStubId stub_id, address* entry) { + // aligned is always false -- x86_64 always uses the unaligned code + const bool aligned = false; + bool is_oop; + bool dest_uninitialized; + switch (stub_id) { + case StubGenStubId::jint_disjoint_arraycopy_id: + is_oop = false; + dest_uninitialized = false; + break; + case StubGenStubId::oop_disjoint_arraycopy_id: + assert(UseCompressedOops, "inconsistent oop copy size!"); + is_oop = true; + dest_uninitialized = false; + break; + case StubGenStubId::oop_disjoint_arraycopy_uninit_id: + assert(UseCompressedOops, "inconsistent oop copy size!"); + is_oop = true; + dest_uninitialized = true; + break; + default: + ShouldNotReachHere(); + } + BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); #if COMPILER2_OR_JVMCI if ((!is_oop || bs->supports_avx3_masked_arraycopy()) && VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) { - return generate_disjoint_copy_avx3_masked(entry, "jint_disjoint_arraycopy_avx3", 2, - aligned, is_oop, dest_uninitialized); + return generate_disjoint_copy_avx3_masked(stub_id, entry); } #endif __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit; @@ -1853,10 +1961,9 @@ __ BIND(L_exit); // Arguments: -// aligned - true => Input and output aligned on a HeapWord == 8-byte boundary -// ignored +// entry - location for return of (post-push) entry +// nooverlap_target - entry to branch to if no overlap detected // is_oop - true => oop array, so generate store check code -// name - stub name string // // Inputs: // c_rarg0 - source array address @@ -1867,18 +1974,39 @@ __ BIND(L_exit); // the hardware handle it. The two dwords within qwords that span // cache line boundaries will still be loaded and stored atomically. // -address StubGenerator::generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target, - address *entry, const char *name, - bool dest_uninitialized) { +address StubGenerator::generate_conjoint_int_oop_copy(StubGenStubId stub_id, address nooverlap_target, address *entry) { + // aligned is always false -- x86_64 always uses the unaligned code + const bool aligned = false; + bool is_oop; + bool dest_uninitialized; + switch (stub_id) { + case StubGenStubId::jint_arraycopy_id: + is_oop = false; + dest_uninitialized = false; + break; + case StubGenStubId::oop_arraycopy_id: + assert(UseCompressedOops, "inconsistent oop copy size!"); + is_oop = true; + dest_uninitialized = false; + break; + case StubGenStubId::oop_arraycopy_uninit_id: + assert(UseCompressedOops, "inconsistent oop copy size!"); + is_oop = true; + dest_uninitialized = true; + break; + default: + ShouldNotReachHere(); + } + BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); #if COMPILER2_OR_JVMCI if ((!is_oop || bs->supports_avx3_masked_arraycopy()) && VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) { - return generate_conjoint_copy_avx3_masked(entry, "jint_conjoint_arraycopy_avx3", 2, - nooverlap_target, aligned, is_oop, dest_uninitialized); + return generate_conjoint_copy_avx3_masked(stub_id, entry, nooverlap_target); } #endif + __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); Label L_copy_bytes, L_copy_8_bytes, L_exit; @@ -1968,10 +2096,7 @@ __ BIND(L_exit); // Arguments: -// aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes -// ignored -// is_oop - true => oop array, so generate store check code -// name - stub name string +// entry - location for return of (post-push) entry // // Inputs: // c_rarg0 - source array address @@ -1982,17 +2107,39 @@ __ BIND(L_exit); // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the // no-overlap entry point used by generate_conjoint_long_oop_copy(). // -address StubGenerator::generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, - const char *name, bool dest_uninitialized) { +address StubGenerator::generate_disjoint_long_oop_copy(StubGenStubId stub_id, address *entry) { + // aligned is always false -- x86_64 always uses the unaligned code + const bool aligned = false; + bool is_oop; + bool dest_uninitialized; + switch (stub_id) { + case StubGenStubId::jlong_disjoint_arraycopy_id: + is_oop = false; + dest_uninitialized = false; + break; + case StubGenStubId::oop_disjoint_arraycopy_id: + assert(!UseCompressedOops, "inconsistent oop copy size!"); + is_oop = true; + dest_uninitialized = false; + break; + case StubGenStubId::oop_disjoint_arraycopy_uninit_id: + assert(!UseCompressedOops, "inconsistent oop copy size!"); + is_oop = true; + dest_uninitialized = true; + break; + default: + ShouldNotReachHere(); + } + BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); #if COMPILER2_OR_JVMCI if ((!is_oop || bs->supports_avx3_masked_arraycopy()) && VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) { - return generate_disjoint_copy_avx3_masked(entry, "jlong_disjoint_arraycopy_avx3", 3, - aligned, is_oop, dest_uninitialized); + return generate_disjoint_copy_avx3_masked(stub_id, entry); } #endif + __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); Label L_copy_bytes, L_copy_8_bytes, L_exit; @@ -2084,28 +2231,48 @@ address StubGenerator::generate_disjoint_long_oop_copy(bool aligned, bool is_oop // Arguments: -// aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes -// ignored +// entry - location for return of (post-push) entry +// nooverlap_target - entry to branch to if no overlap detected // is_oop - true => oop array, so generate store check code -// name - stub name string // // Inputs: // c_rarg0 - source array address // c_rarg1 - destination array address // c_rarg2 - element count, treated as ssize_t, can be zero // -address StubGenerator::generate_conjoint_long_oop_copy(bool aligned, bool is_oop, address nooverlap_target, - address *entry, const char *name, - bool dest_uninitialized) { +address StubGenerator::generate_conjoint_long_oop_copy(StubGenStubId stub_id, address nooverlap_target, address *entry) { + // aligned is always false -- x86_64 always uses the unaligned code + const bool aligned = false; + bool is_oop; + bool dest_uninitialized; + switch (stub_id) { + case StubGenStubId::jlong_arraycopy_id: + is_oop = false; + dest_uninitialized = false; + break; + case StubGenStubId::oop_arraycopy_id: + assert(!UseCompressedOops, "inconsistent oop copy size!"); + is_oop = true; + dest_uninitialized = false; + break; + case StubGenStubId::oop_arraycopy_uninit_id: + assert(!UseCompressedOops, "inconsistent oop copy size!"); + is_oop = true; + dest_uninitialized = true; + break; + default: + ShouldNotReachHere(); + } + BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); #if COMPILER2_OR_JVMCI if ((!is_oop || bs->supports_avx3_masked_arraycopy()) && VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) { - return generate_conjoint_copy_avx3_masked(entry, "jlong_conjoint_arraycopy_avx3", 3, - nooverlap_target, aligned, is_oop, dest_uninitialized); + return generate_conjoint_copy_avx3_masked(stub_id, entry, nooverlap_target); } #endif + __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); Label L_copy_bytes, L_copy_8_bytes, L_exit; @@ -2224,7 +2391,19 @@ void StubGenerator::generate_type_check(Register sub_klass, // rax == 0 - success // rax == -1^K - failure, where K is partial transfer count // -address StubGenerator::generate_checkcast_copy(const char *name, address *entry, bool dest_uninitialized) { +address StubGenerator::generate_checkcast_copy(StubGenStubId stub_id, address *entry) { + + bool dest_uninitialized; + switch (stub_id) { + case StubGenStubId::checkcast_arraycopy_id: + dest_uninitialized = false; + break; + case StubGenStubId::checkcast_arraycopy_uninit_id: + dest_uninitialized = true; + break; + default: + ShouldNotReachHere(); + } Label L_load_element, L_store_element, L_do_card_marks, L_done; @@ -2254,7 +2433,7 @@ address StubGenerator::generate_checkcast_copy(const char *name, address *entry, // checked. __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); // required for proper stackwalking of RuntimeStub frame @@ -2430,8 +2609,7 @@ address StubGenerator::generate_checkcast_copy(const char *name, address *entry, // Examines the alignment of the operands and dispatches // to a long, int, short, or byte copy loop. // -address StubGenerator::generate_unsafe_copy(const char *name, - address byte_copy_entry, address short_copy_entry, +address StubGenerator::generate_unsafe_copy(address byte_copy_entry, address short_copy_entry, address int_copy_entry, address long_copy_entry) { Label L_long_aligned, L_int_aligned, L_short_aligned; @@ -2445,7 +2623,8 @@ address StubGenerator::generate_unsafe_copy(const char *name, const Register bits = rax; // test copy of low bits __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubGenStubId stub_id = StubGenStubId::unsafe_arraycopy_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); // required for proper stackwalking of RuntimeStub frame @@ -2578,10 +2757,10 @@ static void do_setmemory_atomic_loop(USM_TYPE type, Register dest, // Examines the alignment of the operands and dispatches // to an int, short, or byte fill loop. // -address StubGenerator::generate_unsafe_setmemory(const char *name, - address unsafe_byte_fill) { +address StubGenerator::generate_unsafe_setmemory(address unsafe_byte_fill) { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubGenStubId stub_id = StubGenStubId::unsafe_setmemory_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); // required for proper stackwalking of RuntimeStub frame @@ -2724,8 +2903,7 @@ void StubGenerator::arraycopy_range_checks(Register src, // source array oop // rax == 0 - success // rax == -1^K - failure, where K is partial transfer count // -address StubGenerator::generate_generic_copy(const char *name, - address byte_copy_entry, address short_copy_entry, +address StubGenerator::generate_generic_copy(address byte_copy_entry, address short_copy_entry, address int_copy_entry, address oop_copy_entry, address long_copy_entry, address checkcast_copy_entry) { @@ -2751,7 +2929,8 @@ address StubGenerator::generate_generic_copy(const char *name, if (advance < 0) advance += modulus; if (advance > 0) __ nop(advance); } - StubCodeMark mark(this, "StubRoutines", name); + StubGenStubId stub_id = StubGenStubId::generic_arraycopy_id; + StubCodeMark mark(this, stub_id); // Short-hop target to L_failed. Makes for denser prologue code. __ BIND(L_failed_0); diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_chacha.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_chacha.cpp index 9175dea0a5b4d..f7fb402407772 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_chacha.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_chacha.cpp @@ -112,7 +112,8 @@ void StubGenerator::generate_chacha_stubs() { /* The 2-block AVX/AVX2-enabled ChaCha20 block function implementation */ address StubGenerator::generate_chacha20Block_avx() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "chacha20Block"); + StubGenStubId stub_id = StubGenStubId::chacha20Block_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); Label L_twoRounds; @@ -300,7 +301,8 @@ address StubGenerator::generate_chacha20Block_avx() { /* The 4-block AVX512-enabled ChaCha20 block function implementation */ address StubGenerator::generate_chacha20Block_avx512() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "chacha20Block"); + StubGenStubId stub_id = StubGenStubId::chacha20Block_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); Label L_twoRounds; diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_cos.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_cos.cpp index ec6a98ae7b10d..3f037a919d784 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_cos.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_cos.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2021, Intel Corporation. All rights reserved. + * Copyright (c) 2016, 2024, Intel Corporation. All rights reserved. * Intel Math Library (LIBM) Source Code * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -173,7 +173,8 @@ #define __ _masm-> address StubGenerator::generate_libmCos() { - StubCodeMark mark(this, "StubRoutines", "libmCos"); + StubGenStubId stub_id = StubGenStubId::dcos_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); Label L_2TAG_PACKET_0_0_1, L_2TAG_PACKET_1_0_1, L_2TAG_PACKET_2_0_1, L_2TAG_PACKET_3_0_1; diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_exp.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_exp.cpp index 335906806ff8f..b48ed80788b17 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_exp.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_exp.cpp @@ -1,5 +1,5 @@ /* -* Copyright (c) 2016, 2021, Intel Corporation. All rights reserved. +* Copyright (c) 2016, 2024, Intel Corporation. All rights reserved. * Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. * Intel Math Library (LIBM) Source Code * @@ -165,7 +165,8 @@ ATTRIBUTE_ALIGNED(4) static const juint _INF[] = #define __ _masm-> address StubGenerator::generate_libmExp() { - StubCodeMark mark(this, "StubRoutines", "libmExp"); + StubGenStubId stub_id = StubGenStubId::dexp_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); Label L_2TAG_PACKET_0_0_2, L_2TAG_PACKET_1_0_2, L_2TAG_PACKET_2_0_2, L_2TAG_PACKET_3_0_2; diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_fmod.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_fmod.cpp index 3bb0b4f6b1f47..958f65c883ac4 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_fmod.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_fmod.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023, Intel Corporation. All rights reserved. + * Copyright (c) 2023, 2024, Intel Corporation. All rights reserved. * Intel Math Library (LIBM) Source Code * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -73,7 +73,8 @@ ATTRIBUTE_ALIGNED(32) static const uint64_t CONST_e307[] = { address StubGenerator::generate_libmFmod() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "libmFmod"); + StubGenStubId stub_id = StubGenStubId::fmod_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); // required for proper stackwalking of RuntimeStub frame diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_ghash.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_ghash.cpp index 7ed1b76394684..6d1f9fbd5a18f 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_ghash.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_ghash.cpp @@ -82,7 +82,8 @@ void StubGenerator::generate_ghash_stubs() { address StubGenerator::generate_ghash_processBlocks() { __ align(CodeEntryAlignment); Label L_ghash_loop, L_exit; - StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); + StubGenStubId stub_id = StubGenStubId::ghash_processBlocks_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); const Register state = c_rarg0; @@ -218,7 +219,8 @@ address StubGenerator::generate_ghash_processBlocks() { address StubGenerator::generate_avx_ghash_processBlocks() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); + StubGenStubId stub_id = StubGenStubId::ghash_processBlocks_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); // arguments diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_log.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_log.cpp index 1b6c9fdcd37e5..6aacfaaea03de 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_log.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_log.cpp @@ -1,5 +1,5 @@ /* -* Copyright (c) 2016, 2021, Intel Corporation. All rights reserved. +* Copyright (c) 2016, 2024, Intel Corporation. All rights reserved. * Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. * Intel Math Library (LIBM) Source Code * @@ -176,7 +176,8 @@ ATTRIBUTE_ALIGNED(16) static const juint _coeff[] = #define __ _masm-> address StubGenerator::generate_libmLog() { - StubCodeMark mark(this, "StubRoutines", "libmLog"); + StubGenStubId stub_id = StubGenStubId::dlog_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); Label L_2TAG_PACKET_0_0_2, L_2TAG_PACKET_1_0_2, L_2TAG_PACKET_2_0_2, L_2TAG_PACKET_3_0_2; @@ -514,7 +515,8 @@ ATTRIBUTE_ALIGNED(16) static const juint _coeff_log10[] = }; address StubGenerator::generate_libmLog10() { - StubCodeMark mark(this, "StubRoutines", "libmLog10"); + StubGenStubId stub_id = StubGenStubId::dlog10_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); Label L_2TAG_PACKET_0_0_2, L_2TAG_PACKET_1_0_2, L_2TAG_PACKET_2_0_2, L_2TAG_PACKET_3_0_2; diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_poly1305.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_poly1305.cpp index 6f952a603321a..dfd4ca21dbd28 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_poly1305.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_poly1305.cpp @@ -910,7 +910,8 @@ void StubGenerator::poly1305_process_blocks_avx512( // and accumulator will point to the current accumulator value address StubGenerator::generate_poly1305_processBlocks() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "poly1305_processBlocks"); + StubGenStubId stub_id = StubGenStubId::poly1305_processBlocks_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_poly_mont.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_poly_mont.cpp index 1732d251c98a4..b75162dbb47ad 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_poly_mont.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_poly_mont.cpp @@ -237,7 +237,8 @@ void montgomeryMultiply(const Register aLimbs, const Register bLimbs, const Regi address StubGenerator::generate_intpoly_montgomeryMult_P256() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "intpoly_montgomeryMult_P256"); + StubGenStubId stub_id = StubGenStubId::intpoly_montgomeryMult_P256_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -295,7 +296,8 @@ address StubGenerator::generate_intpoly_assign() { // Special Cases 5, 10, 14, 16, 19 __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", "intpoly_assign"); + StubGenStubId stub_id = StubGenStubId::intpoly_assign_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_pow.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_pow.cpp index 4afcea596e88b..4029e53d1b14e 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_pow.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_pow.cpp @@ -1,5 +1,5 @@ /* -* Copyright (c) 2016, 2021, Intel Corporation. All rights reserved. +* Copyright (c) 2016, 2024, Intel Corporation. All rights reserved. * Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. * Intel Math Library (LIBM) Source Code * @@ -759,7 +759,8 @@ ATTRIBUTE_ALIGNED(8) static const juint _DOUBLE0DOT5[] = { #define __ _masm-> address StubGenerator::generate_libmPow() { - StubCodeMark mark(this, "StubRoutines", "libmPow"); + StubGenStubId stub_id = StubGenStubId::dpow_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); Label L_2TAG_PACKET_0_0_2, L_2TAG_PACKET_1_0_2, L_2TAG_PACKET_2_0_2, L_2TAG_PACKET_3_0_2; diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_sha3.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_sha3.cpp index 50c200dbff221..7d1051711f20c 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_sha3.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_sha3.cpp @@ -81,8 +81,8 @@ static address permsAndRotsAddr() { void StubGenerator::generate_sha3_stubs() { if (UseSHA3Intrinsics) { - StubRoutines::_sha3_implCompress = generate_sha3_implCompress(false,"sha3_implCompress"); - StubRoutines::_sha3_implCompressMB = generate_sha3_implCompress(true, "sha3_implCompressMB"); + StubRoutines::_sha3_implCompress = generate_sha3_implCompress(StubGenStubId::sha3_implCompress_id); + StubRoutines::_sha3_implCompressMB = generate_sha3_implCompress(StubGenStubId::sha3_implCompressMB_id); } } @@ -95,9 +95,21 @@ void StubGenerator::generate_sha3_stubs() { // c_rarg3 - int offset // c_rarg4 - int limit // -address StubGenerator::generate_sha3_implCompress(bool multiBlock, const char *name) { +address StubGenerator::generate_sha3_implCompress(StubGenStubId stub_id) { + bool multiBlock; + switch(stub_id) { + case sha3_implCompress_id: + multiBlock = false; + break; + case sha3_implCompressMB_id: + multiBlock = true; + break; + default: + ShouldNotReachHere(); + } + __ align(CodeEntryAlignment); - StubCodeMark mark(this, "StubRoutines", name); + StubCodeMark mark(this, stub_id); address start = __ pc(); const Register buf = c_rarg0; diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_sin.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_sin.cpp index 61caf5066fc31..362eeb95e41a3 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_sin.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_sin.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2021, Intel Corporation. All rights reserved. + * Copyright (c) 2016, 2024, Intel Corporation. All rights reserved. * Intel Math Library (LIBM) Source Code * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -180,7 +180,8 @@ ATTRIBUTE_ALIGNED(8) static const juint _ALL_ONES[] = #define __ _masm-> address StubGenerator::generate_libmSin() { - StubCodeMark mark(this, "StubRoutines", "libmSin"); + StubGenStubId stub_id = StubGenStubId::dsin_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); Label L_2TAG_PACKET_0_0_1, L_2TAG_PACKET_1_0_1, L_2TAG_PACKET_2_0_1, L_2TAG_PACKET_3_0_1; diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_tan.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_tan.cpp index 11889f627a95d..46cb0801a81ff 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_tan.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_tan.cpp @@ -1,5 +1,5 @@ /* -* Copyright (c) 2016, 2021, Intel Corporation. All rights reserved. +* Copyright (c) 2016, 2024, Intel Corporation. All rights reserved. * Intel Math Library (LIBM) Source Code * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -455,7 +455,8 @@ ATTRIBUTE_ALIGNED(8) static const juint _QQ_2_tan[] = #define __ _masm-> address StubGenerator::generate_libmTan() { - StubCodeMark mark(this, "StubRoutines", "libmTan"); + StubGenStubId stub_id = StubGenStubId::dtan_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); Label L_2TAG_PACKET_0_0_1, L_2TAG_PACKET_1_0_1, L_2TAG_PACKET_2_0_1, L_2TAG_PACKET_3_0_1; diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_tanh.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_tanh.cpp index b105fd1088970..d13809bfcd911 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_tanh.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_tanh.cpp @@ -302,7 +302,8 @@ ATTRIBUTE_ALIGNED(16) static const juint _T2_neg_f[] = #define __ _masm-> address StubGenerator::generate_libmTanh() { - StubCodeMark mark(this, "StubRoutines", "libmTanh"); + StubGenStubId stub_id = StubGenStubId::dtanh_id; + StubCodeMark mark(this, stub_id); address start = __ pc(); Label L_2TAG_PACKET_0_0_1, L_2TAG_PACKET_1_0_1, L_2TAG_PACKET_2_0_1, L_2TAG_PACKET_3_0_1; diff --git a/src/hotspot/cpu/x86/stubRoutines_x86.cpp b/src/hotspot/cpu/x86/stubRoutines_x86.cpp index 9db3a89d29c51..861c1e1216e3b 100644 --- a/src/hotspot/cpu/x86/stubRoutines_x86.cpp +++ b/src/hotspot/cpu/x86/stubRoutines_x86.cpp @@ -32,61 +32,24 @@ // Implementation of the platform-specific part of StubRoutines - for // a description of how to extend it, see the stubRoutines.hpp file. -address StubRoutines::x86::_verify_mxcsr_entry = nullptr; -address StubRoutines::x86::_upper_word_mask_addr = nullptr; -address StubRoutines::x86::_shuffle_byte_flip_mask_addr = nullptr; +// define fields for arch-specific entries + +#define DEFINE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) \ + address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = nullptr; + +#define DEFINE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \ + address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = CAST_FROM_FN_PTR(address, init_function); + +STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY, DEFINE_ARCH_ENTRY_INIT) + +#undef DEFINE_ARCH_ENTRY_INIT +#undef DEFINE_ARCH_ENTRY + address StubRoutines::x86::_k256_adr = nullptr; -address StubRoutines::x86::_vector_short_to_byte_mask = nullptr; -address StubRoutines::x86::_vector_int_to_byte_mask = nullptr; -address StubRoutines::x86::_vector_int_to_short_mask = nullptr; -address StubRoutines::x86::_vector_all_bits_set = nullptr; -address StubRoutines::x86::_vector_byte_shuffle_mask = nullptr; -address StubRoutines::x86::_vector_int_mask_cmp_bits = nullptr; -address StubRoutines::x86::_vector_short_shuffle_mask = nullptr; -address StubRoutines::x86::_vector_int_shuffle_mask = nullptr; -address StubRoutines::x86::_vector_long_shuffle_mask = nullptr; -address StubRoutines::x86::_vector_float_sign_mask = nullptr; -address StubRoutines::x86::_vector_float_sign_flip = nullptr; -address StubRoutines::x86::_vector_double_sign_mask = nullptr; -address StubRoutines::x86::_vector_double_sign_flip = nullptr; -address StubRoutines::x86::_vector_byte_perm_mask = nullptr; -address StubRoutines::x86::_vector_long_sign_mask = nullptr; -address StubRoutines::x86::_vector_iota_indices = nullptr; -address StubRoutines::x86::_vector_reverse_bit_lut = nullptr; -address StubRoutines::x86::_vector_reverse_byte_perm_mask_long = nullptr; -address StubRoutines::x86::_vector_reverse_byte_perm_mask_int = nullptr; -address StubRoutines::x86::_vector_reverse_byte_perm_mask_short = nullptr; -address StubRoutines::x86::_vector_popcount_lut = nullptr; -address StubRoutines::x86::_vector_count_leading_zeros_lut = nullptr; -address StubRoutines::x86::_vector_32_bit_mask = nullptr; -address StubRoutines::x86::_vector_64_bit_mask = nullptr; #ifdef _LP64 address StubRoutines::x86::_k256_W_adr = nullptr; address StubRoutines::x86::_k512_W_addr = nullptr; -address StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = nullptr; -// Base64 masks -address StubRoutines::x86::_encoding_table_base64 = nullptr; -address StubRoutines::x86::_shuffle_base64 = nullptr; -address StubRoutines::x86::_avx2_shuffle_base64 = nullptr; -address StubRoutines::x86::_avx2_input_mask_base64 = nullptr; -address StubRoutines::x86::_avx2_lut_base64 = nullptr; -address StubRoutines::x86::_avx2_decode_tables_base64 = nullptr; -address StubRoutines::x86::_avx2_decode_lut_tables_base64 = nullptr; -address StubRoutines::x86::_lookup_lo_base64 = nullptr; -address StubRoutines::x86::_lookup_hi_base64 = nullptr; -address StubRoutines::x86::_lookup_lo_base64url = nullptr; -address StubRoutines::x86::_lookup_hi_base64url = nullptr; -address StubRoutines::x86::_pack_vec_base64 = nullptr; -address StubRoutines::x86::_join_0_1_base64 = nullptr; -address StubRoutines::x86::_join_1_2_base64 = nullptr; -address StubRoutines::x86::_join_2_3_base64 = nullptr; -address StubRoutines::x86::_decoding_table_base64 = nullptr; -address StubRoutines::x86::_compress_perm_table32 = nullptr; -address StubRoutines::x86::_compress_perm_table64 = nullptr; -address StubRoutines::x86::_expand_perm_table32 = nullptr; -address StubRoutines::x86::_expand_perm_table64 = nullptr; #endif -address StubRoutines::x86::_pshuffle_byte_flip_mask_addr = nullptr; const uint64_t StubRoutines::x86::_crc_by128_masks[] = { diff --git a/src/hotspot/cpu/x86/stubRoutines_x86.hpp b/src/hotspot/cpu/x86/stubRoutines_x86.hpp index 0a6d091de8c7f..aaf84eb843777 100644 --- a/src/hotspot/cpu/x86/stubRoutines_x86.hpp +++ b/src/hotspot/cpu/x86/stubRoutines_x86.hpp @@ -31,82 +31,52 @@ static bool returns_to_call_stub(address return_pc) { return return_pc == _call_stub_return_address; } +// emit enum used to size per-blob code buffers + +#define DEFINE_BLOB_SIZE(blob_name, size) \ + _ ## blob_name ## _code_size = size, + enum platform_dependent_constants { - // simply increase sizes if too small (assembler will crash if too small) - _initial_stubs_code_size = 20000 WINDOWS_ONLY(+1000), - _continuation_stubs_code_size = 1000 LP64_ONLY(+2000), - // AVX512 intrinsics add more code in 64-bit VM, - // Windows have more code to save/restore registers - _compiler_stubs_code_size = 20000 LP64_ONLY(+47000) WINDOWS_ONLY(+2000), - _final_stubs_code_size = 10000 LP64_ONLY(+20000) WINDOWS_ONLY(+22000) ZGC_ONLY(+20000) + STUBGEN_ARCH_BLOBS_DO(DEFINE_BLOB_SIZE) }; +#undef DEFINE_BLOB_SIZE + class x86 { friend class StubGenerator; friend class VMStructs; -#ifdef _LP64 - private: - static address _get_previous_sp_entry; - - static address _f2i_fixup; - static address _f2l_fixup; - static address _d2i_fixup; - static address _d2l_fixup; + // declare fields for arch-specific entries - static address _float_sign_mask; - static address _float_sign_flip; - static address _double_sign_mask; - static address _double_sign_flip; - static address _compress_perm_table32; - static address _compress_perm_table64; - static address _expand_perm_table32; - static address _expand_perm_table64; +#define DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) \ + static address STUB_FIELD_NAME(field_name) ; - public: +#define DECLARE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \ + DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) - static address get_previous_sp_entry() { - return _get_previous_sp_entry; - } +private: + STUBGEN_ARCH_ENTRIES_DO(DECLARE_ARCH_ENTRY, DECLARE_ARCH_ENTRY_INIT) - static address f2i_fixup() { - return _f2i_fixup; - } +#undef DECLARE_ARCH_ENTRY_INIT +#undef DECLARE_ARCH_ENTRY - static address f2l_fixup() { - return _f2l_fixup; - } - static address d2i_fixup() { - return _d2i_fixup; - } + // define getters for arch-specific entries - static address d2l_fixup() { - return _d2l_fixup; - } +#define DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name) \ + static address getter_name() { return STUB_FIELD_NAME(field_name); } - static address float_sign_mask() { - return _float_sign_mask; - } +#define DEFINE_ARCH_ENTRY_GETTER_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \ + DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name) - static address float_sign_flip() { - return _float_sign_flip; - } +public: + STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY_GETTER, DEFINE_ARCH_ENTRY_GETTER_INIT) - static address double_sign_mask() { - return _double_sign_mask; - } +#undef DEFINE_ARCH_ENTRY_GETTER_INIT +#undef DEFINE_ARCH_GETTER_ENTRY - static address double_sign_flip() { - return _double_sign_flip; - } -#else // !LP64 - - private: - static address _verify_fpu_cntrl_wrd_entry; - static address _d2i_wrapper; - static address _d2l_wrapper; +#ifndef _LP64 static jint _fpu_cntrl_wrd_std; static jint _fpu_cntrl_wrd_24; @@ -115,10 +85,6 @@ class x86 { static jint _fpu_subnormal_bias1[3]; static jint _fpu_subnormal_bias2[3]; - public: - static address verify_fpu_cntrl_wrd_entry() { return _verify_fpu_cntrl_wrd_entry; } - static address d2i_wrapper() { return _d2i_wrapper; } - static address d2l_wrapper() { return _d2l_wrapper; } static address addr_fpu_cntrl_wrd_std() { return (address)&_fpu_cntrl_wrd_std; } static address addr_fpu_cntrl_wrd_24() { return (address)&_fpu_cntrl_wrd_24; } static address addr_fpu_cntrl_wrd_trunc() { return (address)&_fpu_cntrl_wrd_trunc; } @@ -133,9 +99,6 @@ class x86 { #ifdef _LP64 static jint _mxcsr_rz; #endif // _LP64 - - static address _verify_mxcsr_entry; - // masks and table for CRC32 static const uint64_t _crc_by128_masks[]; static const juint _crc_table[]; @@ -149,73 +112,21 @@ class x86 { static juint* _crc32c_table; // table for arrays_hashcode static const jint _arrays_hashcode_powers_of_31[]; - - // upper word mask for sha1 - static address _upper_word_mask_addr; - // byte flip mask for sha1 - static address _shuffle_byte_flip_mask_addr; - //k256 table for sha256 static const juint _k256[]; static address _k256_adr; - static address _vector_short_to_byte_mask; - static address _vector_float_sign_mask; - static address _vector_float_sign_flip; - static address _vector_double_sign_mask; - static address _vector_double_sign_flip; - static address _vector_long_sign_mask; - static address _vector_all_bits_set; - static address _vector_int_mask_cmp_bits; - static address _vector_byte_perm_mask; - static address _vector_int_to_byte_mask; - static address _vector_int_to_short_mask; - static address _vector_32_bit_mask; - static address _vector_64_bit_mask; - static address _vector_int_shuffle_mask; - static address _vector_byte_shuffle_mask; - static address _vector_short_shuffle_mask; - static address _vector_long_shuffle_mask; - static address _vector_iota_indices; - static address _vector_popcount_lut; - static address _vector_count_leading_zeros_lut; - static address _vector_reverse_bit_lut; - static address _vector_reverse_byte_perm_mask_long; - static address _vector_reverse_byte_perm_mask_int; - static address _vector_reverse_byte_perm_mask_short; #ifdef _LP64 static juint _k256_W[]; static address _k256_W_adr; static const julong _k512_W[]; static address _k512_W_addr; - // byte flip mask for sha512 - static address _pshuffle_byte_flip_mask_addr_sha512; - // Masks for base64 - static address _encoding_table_base64; - static address _shuffle_base64; - static address _avx2_shuffle_base64; - static address _avx2_input_mask_base64; - static address _avx2_lut_base64; - static address _avx2_decode_tables_base64; - static address _avx2_decode_lut_tables_base64; - static address _lookup_lo_base64; - static address _lookup_hi_base64; - static address _lookup_lo_base64url; - static address _lookup_hi_base64url; - static address _pack_vec_base64; - static address _join_0_1_base64; - static address _join_1_2_base64; - static address _join_2_3_base64; - static address _decoding_table_base64; #endif - // byte flip mask for sha256 - static address _pshuffle_byte_flip_mask_addr; public: static address addr_mxcsr_std() { return (address)&_mxcsr_std; } #ifdef _LP64 static address addr_mxcsr_rz() { return (address)&_mxcsr_rz; } #endif // _LP64 - static address verify_mxcsr_entry() { return _verify_mxcsr_entry; } static address crc_by128_masks_addr() { return (address)_crc_by128_masks; } #ifdef _LP64 static address crc_by128_masks_avx512_addr() { return (address)_crc_by128_masks_avx512; } @@ -223,131 +134,12 @@ class x86 { static address crc_table_avx512_addr() { return (address)_crc_table_avx512; } static address crc32c_table_avx512_addr() { return (address)_crc32c_table_avx512; } #endif // _LP64 - static address upper_word_mask_addr() { return _upper_word_mask_addr; } - static address shuffle_byte_flip_mask_addr() { return _shuffle_byte_flip_mask_addr; } static address k256_addr() { return _k256_adr; } - static address method_entry_barrier() { return _method_entry_barrier; } - - static address vector_short_to_byte_mask() { - return _vector_short_to_byte_mask; - } - static address vector_float_sign_mask() { - return _vector_float_sign_mask; - } - - static address vector_float_sign_flip() { - return _vector_float_sign_flip; - } - - static address vector_double_sign_mask() { - return _vector_double_sign_mask; - } - - static address vector_double_sign_flip() { - return _vector_double_sign_flip; - } - - static address vector_all_bits_set() { - return _vector_all_bits_set; - } - - static address vector_int_mask_cmp_bits() { - return _vector_int_mask_cmp_bits; - } - - static address vector_byte_perm_mask() { - return _vector_byte_perm_mask; - } - - static address vector_int_to_byte_mask() { - return _vector_int_to_byte_mask; - } - - static address vector_int_to_short_mask() { - return _vector_int_to_short_mask; - } - - static address vector_32_bit_mask() { - return _vector_32_bit_mask; - } - - static address vector_64_bit_mask() { - return _vector_64_bit_mask; - } - - static address vector_int_shuffle_mask() { - return _vector_int_shuffle_mask; - } - - static address vector_byte_shuffle_mask() { - return _vector_byte_shuffle_mask; - } - - static address vector_short_shuffle_mask() { - return _vector_short_shuffle_mask; - } - - static address vector_long_shuffle_mask() { - return _vector_long_shuffle_mask; - } - - static address vector_long_sign_mask() { - return _vector_long_sign_mask; - } - - static address vector_iota_indices() { - return _vector_iota_indices; - } - - static address vector_count_leading_zeros_lut() { - return _vector_count_leading_zeros_lut; - } - - static address vector_reverse_bit_lut() { - return _vector_reverse_bit_lut; - } - - static address vector_reverse_byte_perm_mask_long() { - return _vector_reverse_byte_perm_mask_long; - } - - static address vector_reverse_byte_perm_mask_int() { - return _vector_reverse_byte_perm_mask_int; - } - - static address vector_reverse_byte_perm_mask_short() { - return _vector_reverse_byte_perm_mask_short; - } - - static address vector_popcount_lut() { - return _vector_popcount_lut; - } #ifdef _LP64 static address k256_W_addr() { return _k256_W_adr; } static address k512_W_addr() { return _k512_W_addr; } - static address pshuffle_byte_flip_mask_addr_sha512() { return _pshuffle_byte_flip_mask_addr_sha512; } - static address base64_encoding_table_addr() { return _encoding_table_base64; } - static address base64_shuffle_addr() { return _shuffle_base64; } - static address base64_avx2_shuffle_addr() { return _avx2_shuffle_base64; } - static address base64_avx2_input_mask_addr() { return _avx2_input_mask_base64; } - static address base64_avx2_lut_addr() { return _avx2_lut_base64; } - static address base64_vbmi_lookup_lo_addr() { return _lookup_lo_base64; } - static address base64_vbmi_lookup_hi_addr() { return _lookup_hi_base64; } - static address base64_vbmi_lookup_lo_url_addr() { return _lookup_lo_base64url; } - static address base64_vbmi_lookup_hi_url_addr() { return _lookup_hi_base64url; } - static address base64_vbmi_pack_vec_addr() { return _pack_vec_base64; } - static address base64_vbmi_join_0_1_addr() { return _join_0_1_base64; } - static address base64_vbmi_join_1_2_addr() { return _join_1_2_base64; } - static address base64_vbmi_join_2_3_addr() { return _join_2_3_base64; } - static address base64_decoding_table_addr() { return _decoding_table_base64; } - static address base64_AVX2_decode_tables_addr() { return _avx2_decode_tables_base64; } - static address base64_AVX2_decode_LUT_tables_addr() { return _avx2_decode_lut_tables_base64; } - static address compress_perm_table32() { return _compress_perm_table32; } - static address compress_perm_table64() { return _compress_perm_table64; } - static address expand_perm_table32() { return _expand_perm_table32; } - static address expand_perm_table64() { return _expand_perm_table64; } #endif - static address pshuffle_byte_flip_mask_addr() { return _pshuffle_byte_flip_mask_addr; } + static address arrays_hashcode_powers_of_31() { return (address)_arrays_hashcode_powers_of_31; } static void generate_CRC32C_table(bool is_pclmulqdq_supported); }; diff --git a/src/hotspot/cpu/x86/stubRoutines_x86_32.cpp b/src/hotspot/cpu/x86/stubRoutines_x86_32.cpp index 64e367b4c40fa..810f421f1418c 100644 --- a/src/hotspot/cpu/x86/stubRoutines_x86_32.cpp +++ b/src/hotspot/cpu/x86/stubRoutines_x86_32.cpp @@ -30,11 +30,6 @@ // Implementation of the platform-specific part of StubRoutines - for // a description of how to extend it, see the stubRoutines.hpp file. -address StubRoutines::x86::_verify_fpu_cntrl_wrd_entry = nullptr; - -address StubRoutines::x86::_d2i_wrapper = nullptr; -address StubRoutines::x86::_d2l_wrapper = nullptr; - jint StubRoutines::x86::_fpu_cntrl_wrd_std = 0; jint StubRoutines::x86::_fpu_cntrl_wrd_24 = 0; jint StubRoutines::x86::_fpu_cntrl_wrd_trunc = 0; diff --git a/src/hotspot/cpu/x86/stubRoutines_x86_64.cpp b/src/hotspot/cpu/x86/stubRoutines_x86_64.cpp index ef3097ebd24cf..0825ba63b4fe8 100644 --- a/src/hotspot/cpu/x86/stubRoutines_x86_64.cpp +++ b/src/hotspot/cpu/x86/stubRoutines_x86_64.cpp @@ -32,14 +32,3 @@ jint StubRoutines::x86::_mxcsr_std = 0; jint StubRoutines::x86::_mxcsr_rz = 0; - -address StubRoutines::x86::_get_previous_sp_entry = nullptr; - -address StubRoutines::x86::_f2i_fixup = nullptr; -address StubRoutines::x86::_f2l_fixup = nullptr; -address StubRoutines::x86::_d2i_fixup = nullptr; -address StubRoutines::x86::_d2l_fixup = nullptr; -address StubRoutines::x86::_float_sign_mask = nullptr; -address StubRoutines::x86::_float_sign_flip = nullptr; -address StubRoutines::x86::_double_sign_mask = nullptr; -address StubRoutines::x86::_double_sign_flip = nullptr; diff --git a/src/hotspot/cpu/zero/stubDeclarations_zero.hpp b/src/hotspot/cpu/zero/stubDeclarations_zero.hpp new file mode 100644 index 0000000000000..5808ae3bd8fa1 --- /dev/null +++ b/src/hotspot/cpu/zero/stubDeclarations_zero.hpp @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2025, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_ZERO_STUBDECLARATIONS_HPP +#define CPU_ZERO_STUBDECLARATIONS_HPP + +#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \ + do_arch_blob, \ + do_arch_entry, \ + do_arch_entry_init) \ + do_arch_blob(initial, 0) \ + + +#define STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, \ + do_arch_blob, \ + do_arch_entry, \ + do_arch_entry_init) \ + do_arch_blob(continuation, 0) \ + + +#define STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, \ + do_arch_blob, \ + do_arch_entry, \ + do_arch_entry_init) \ + do_arch_blob(compiler, 0) \ + + +#define STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, \ + do_arch_blob, \ + do_arch_entry, \ + do_arch_entry_init) \ + do_arch_blob(final, 0) \ + + +#endif // CPU_ZERO_STUBDECLARATIONS_HPP diff --git a/src/hotspot/cpu/zero/stubGenerator_zero.cpp b/src/hotspot/cpu/zero/stubGenerator_zero.cpp index 0ab43ed78f7a3..07b4e2a92afbe 100644 --- a/src/hotspot/cpu/zero/stubGenerator_zero.cpp +++ b/src/hotspot/cpu/zero/stubGenerator_zero.cpp @@ -179,8 +179,6 @@ class StubGenerator: public StubCodeGenerator { } void generate_initial_stubs() { - // Generates all stubs and initializes the entry points - // entry points that exist in all platforms Note: This is code // that could be shared among different platforms - however the // benefit seems to be smaller than the disadvantage of having a @@ -199,26 +197,44 @@ class StubGenerator: public StubCodeGenerator { StubRoutines::_fence_entry = ShouldNotCallThisStub(); } - void generate_final_stubs() { - // Generates all stubs and initializes the entry points + void generate_continuation_stubs() { + // do nothing + } + void generate_compiler_stubs() { + // do nothing + } + + void generate_final_stubs() { // arraycopy stubs used by compilers generate_arraycopy_stubs(); } public: - StubGenerator(CodeBuffer* code, StubsKind kind) : StubCodeGenerator(code) { - if (kind == Initial_stubs) { + StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) { + switch(blob_id) { + case initial_id: generate_initial_stubs(); - } else if (kind == Final_stubs) { + break; + case continuation_id: + generate_continuation_stubs(); + break; + case compiler_id: + // do nothing + break; + case final_id: generate_final_stubs(); - } + break; + default: + fatal("unexpected blob id: %d", blob_id); + break; + }; } }; -void StubGenerator_generate(CodeBuffer* code, StubCodeGenerator::StubsKind kind) { - StubGenerator g(code, kind); +void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id) { + StubGenerator g(code, blob_id); } EntryFrame *EntryFrame::build(const intptr_t* parameters, diff --git a/src/hotspot/cpu/zero/stubRoutines_zero.cpp b/src/hotspot/cpu/zero/stubRoutines_zero.cpp index 4337b2db73042..47d2c27eefdf1 100644 --- a/src/hotspot/cpu/zero/stubRoutines_zero.cpp +++ b/src/hotspot/cpu/zero/stubRoutines_zero.cpp @@ -27,3 +27,5 @@ #include "runtime/frame.inline.hpp" #include "runtime/javaThread.hpp" #include "runtime/stubRoutines.hpp" + +// zero has no arch-specific stubs nor any associated entries diff --git a/src/hotspot/cpu/zero/stubRoutines_zero.hpp b/src/hotspot/cpu/zero/stubRoutines_zero.hpp index bd04fdddc3d56..2002efc9f51d5 100644 --- a/src/hotspot/cpu/zero/stubRoutines_zero.hpp +++ b/src/hotspot/cpu/zero/stubRoutines_zero.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -39,14 +39,18 @@ return return_pc == call_stub_return_pc(); } - enum platform_dependent_constants { - // The assembler will fail with a guarantee if these are too small. - // Simply increase them if that happens. - _initial_stubs_code_size = 0, - _continuation_stubs_code_size = 0, - _compiler_stubs_code_size = 0, - _final_stubs_code_size = 0 - }; +// emit enum used to size per-blob code buffers + +#define DEFINE_BLOB_SIZE(blob_name, size) \ + _ ## blob_name ## _code_size = size, + +enum platform_dependent_constants { + STUBGEN_ARCH_BLOBS_DO(DEFINE_BLOB_SIZE) +}; + +#undef DEFINE_BLOB_SIZE + +// zero has no arch-specific stubs nor any associated entries enum method_handles_platform_dependent_constants { method_handles_adapters_code_size = 0 diff --git a/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp b/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp index 4debfaa077f06..5723e86f8594c 100644 --- a/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp +++ b/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp @@ -537,7 +537,7 @@ int64_t ARMAtomicFuncs::cmpxchg_long_bootstrap(int64_t compare_value, int64_t ex int64_t ARMAtomicFuncs::load_long_bootstrap(const volatile int64_t* src) { // try to use the stub: - load_long_func_t func = CAST_TO_FN_PTR(load_long_func_t, StubRoutines::atomic_load_long_entry()); + load_long_func_t func = CAST_TO_FN_PTR(load_long_func_t, StubRoutines::Arm::atomic_load_long_entry()); if (func != nullptr) { _load_long_func = func; @@ -551,7 +551,7 @@ int64_t ARMAtomicFuncs::load_long_bootstrap(const volatile int64_t* src) { void ARMAtomicFuncs::store_long_bootstrap(int64_t val, volatile int64_t* dest) { // try to use the stub: - store_long_func_t func = CAST_TO_FN_PTR(store_long_func_t, StubRoutines::atomic_store_long_entry()); + store_long_func_t func = CAST_TO_FN_PTR(store_long_func_t, StubRoutines::Arm::atomic_store_long_entry()); if (func != nullptr) { _store_long_func = func; diff --git a/src/hotspot/share/cds/cdsConfig.cpp b/src/hotspot/share/cds/cdsConfig.cpp index a86995adb2ded..5fa6a94f70b4e 100644 --- a/src/hotspot/share/cds/cdsConfig.cpp +++ b/src/hotspot/share/cds/cdsConfig.cpp @@ -419,6 +419,11 @@ void CDSConfig::check_flag_aliases() { bool CDSConfig::check_vm_args_consistency(bool patch_mod_javabase, bool mode_flag_cmd_line) { check_flag_aliases(); + if (!FLAG_IS_DEFAULT(AOTMode)) { + // Using any form of the new AOTMode switch enables enhanced optimizations. + FLAG_SET_ERGO_IF_DEFAULT(AOTClassLinking, true); + } + if (AOTClassLinking) { // If AOTClassLinking is specified, enable all AOT optimizations by default. FLAG_SET_ERGO_IF_DEFAULT(AOTInvokeDynamicLinking, true); diff --git a/src/hotspot/share/cds/metaspaceShared.cpp b/src/hotspot/share/cds/metaspaceShared.cpp index a51a903df090a..d1b33da19d89d 100644 --- a/src/hotspot/share/cds/metaspaceShared.cpp +++ b/src/hotspot/share/cds/metaspaceShared.cpp @@ -316,7 +316,10 @@ void MetaspaceShared::initialize_for_static_dump() { SharedBaseAddress = (size_t)_requested_base_address; size_t symbol_rs_size = LP64_ONLY(3 * G) NOT_LP64(128 * M); - _symbol_rs = MemoryReserver::reserve(symbol_rs_size, mtClassShared); + _symbol_rs = MemoryReserver::reserve(symbol_rs_size, + os::vm_allocation_granularity(), + os::vm_page_size(), + mtClassShared); if (!_symbol_rs.is_reserved()) { log_error(cds)("Unable to reserve memory for symbols: %zu bytes.", symbol_rs_size); MetaspaceShared::unrecoverable_writing_error(); diff --git a/src/hotspot/share/classfile/javaClasses.cpp b/src/hotspot/share/classfile/javaClasses.cpp index 4c698170079ba..a9e683900199f 100644 --- a/src/hotspot/share/classfile/javaClasses.cpp +++ b/src/hotspot/share/classfile/javaClasses.cpp @@ -4667,28 +4667,31 @@ int java_lang_invoke_MethodType::rtype_slot_count(oop mt) { // Support for java_lang_invoke_CallSite int java_lang_invoke_CallSite::_target_offset; -int java_lang_invoke_CallSite::_context_offset; +int java_lang_invoke_CallSite::_vmdependencies_offset; +int java_lang_invoke_CallSite::_last_cleanup_offset; #define CALLSITE_FIELDS_DO(macro) \ macro(_target_offset, k, "target", java_lang_invoke_MethodHandle_signature, false); \ - macro(_context_offset, k, "context", java_lang_invoke_MethodHandleNatives_CallSiteContext_signature, false) void java_lang_invoke_CallSite::compute_offsets() { InstanceKlass* k = vmClasses::CallSite_klass(); CALLSITE_FIELDS_DO(FIELD_COMPUTE_OFFSET); + CALLSITE_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET); } #if INCLUDE_CDS void java_lang_invoke_CallSite::serialize_offsets(SerializeClosure* f) { CALLSITE_FIELDS_DO(FIELD_SERIALIZE_OFFSET); + CALLSITE_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET); } #endif -oop java_lang_invoke_CallSite::context_no_keepalive(oop call_site) { +DependencyContext java_lang_invoke_CallSite::vmdependencies(oop call_site) { assert(java_lang_invoke_CallSite::is_instance(call_site), ""); - - oop dep_oop = call_site->obj_field_access(_context_offset); - return dep_oop; + nmethodBucket* volatile* vmdeps_addr = call_site->field_addr(_vmdependencies_offset); + volatile uint64_t* last_cleanup_addr = call_site->field_addr(_last_cleanup_offset); + DependencyContext dep_ctx(vmdeps_addr, last_cleanup_addr); + return dep_ctx; } // Support for java_lang_invoke_ConstantCallSite @@ -4709,30 +4712,6 @@ void java_lang_invoke_ConstantCallSite::serialize_offsets(SerializeClosure* f) { } #endif -// Support for java_lang_invoke_MethodHandleNatives_CallSiteContext - -int java_lang_invoke_MethodHandleNatives_CallSiteContext::_vmdependencies_offset; -int java_lang_invoke_MethodHandleNatives_CallSiteContext::_last_cleanup_offset; - -void java_lang_invoke_MethodHandleNatives_CallSiteContext::compute_offsets() { - InstanceKlass* k = vmClasses::Context_klass(); - CALLSITECONTEXT_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET); -} - -#if INCLUDE_CDS -void java_lang_invoke_MethodHandleNatives_CallSiteContext::serialize_offsets(SerializeClosure* f) { - CALLSITECONTEXT_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET); -} -#endif - -DependencyContext java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(oop call_site) { - assert(java_lang_invoke_MethodHandleNatives_CallSiteContext::is_instance(call_site), ""); - nmethodBucket* volatile* vmdeps_addr = call_site->field_addr(_vmdependencies_offset); - volatile uint64_t* last_cleanup_addr = call_site->field_addr(_last_cleanup_offset); - DependencyContext dep_ctx(vmdeps_addr, last_cleanup_addr); - return dep_ctx; -} - // Support for java_lang_ClassLoader int java_lang_ClassLoader::_loader_data_offset; @@ -5382,7 +5361,6 @@ void java_lang_InternalError::serialize_offsets(SerializeClosure* f) { f(java_lang_invoke_MethodType) \ f(java_lang_invoke_CallSite) \ f(java_lang_invoke_ConstantCallSite) \ - f(java_lang_invoke_MethodHandleNatives_CallSiteContext) \ f(java_lang_reflect_AccessibleObject) \ f(java_lang_reflect_Method) \ f(java_lang_reflect_Constructor) \ @@ -5448,8 +5426,7 @@ bool JavaClasses::is_supported_for_archiving(oop obj) { if (!CDSConfig::is_dumping_invokedynamic()) { // These are supported by CDS only when CDSConfig::is_dumping_invokedynamic() is enabled. if (klass == vmClasses::ResolvedMethodName_klass() || - klass == vmClasses::MemberName_klass() || - klass == vmClasses::Context_klass()) { + klass == vmClasses::MemberName_klass()) { return false; } } diff --git a/src/hotspot/share/classfile/javaClasses.hpp b/src/hotspot/share/classfile/javaClasses.hpp index c75261981cffa..bac6b86fd803a 100644 --- a/src/hotspot/share/classfile/javaClasses.hpp +++ b/src/hotspot/share/classfile/javaClasses.hpp @@ -1409,13 +1409,17 @@ class java_lang_invoke_MethodType: AllStatic { // Interface to java.lang.invoke.CallSite objects +#define CALLSITE_INJECTED_FIELDS(macro) \ + macro(java_lang_invoke_CallSite, vmdependencies, intptr_signature, false) \ + macro(java_lang_invoke_CallSite, last_cleanup, long_signature, false) class java_lang_invoke_CallSite: AllStatic { friend class JavaClasses; private: static int _target_offset; - static int _context_offset; + static int _vmdependencies_offset; + static int _last_cleanup_offset; static void compute_offsets(); @@ -1426,7 +1430,7 @@ class java_lang_invoke_CallSite: AllStatic { static void set_target( oop site, oop target); static void set_target_volatile( oop site, oop target); - static oop context_no_keepalive(oop site); + static DependencyContext vmdependencies(oop call_site); // Testers static bool is_subclass(Klass* klass) { @@ -1436,7 +1440,6 @@ class java_lang_invoke_CallSite: AllStatic { // Accessors for code generation: static int target_offset() { CHECK_INIT(_target_offset); } - static int context_offset() { CHECK_INIT(_context_offset); } }; // Interface to java.lang.invoke.ConstantCallSite objects @@ -1461,35 +1464,6 @@ class java_lang_invoke_ConstantCallSite: AllStatic { static bool is_instance(oop obj); }; -// Interface to java.lang.invoke.MethodHandleNatives$CallSiteContext objects - -#define CALLSITECONTEXT_INJECTED_FIELDS(macro) \ - macro(java_lang_invoke_MethodHandleNatives_CallSiteContext, vmdependencies, intptr_signature, false) \ - macro(java_lang_invoke_MethodHandleNatives_CallSiteContext, last_cleanup, long_signature, false) - -class DependencyContext; - -class java_lang_invoke_MethodHandleNatives_CallSiteContext : AllStatic { - friend class JavaClasses; - -private: - static int _vmdependencies_offset; - static int _last_cleanup_offset; - - static void compute_offsets(); - -public: - static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; - // Accessors - static DependencyContext vmdependencies(oop context); - - // Testers - static bool is_subclass(Klass* klass) { - return klass->is_subclass_of(vmClasses::Context_klass()); - } - static bool is_instance(oop obj); -}; - // Interface to java.lang.ClassLoader objects #define CLASSLOADER_INJECTED_FIELDS(macro) \ diff --git a/src/hotspot/share/classfile/javaClasses.inline.hpp b/src/hotspot/share/classfile/javaClasses.inline.hpp index 682806b8f3dfa..6a698e02298bf 100644 --- a/src/hotspot/share/classfile/javaClasses.inline.hpp +++ b/src/hotspot/share/classfile/javaClasses.inline.hpp @@ -263,10 +263,6 @@ inline bool java_lang_invoke_ConstantCallSite::is_instance(oop obj) { return obj != nullptr && is_subclass(obj->klass()); } -inline bool java_lang_invoke_MethodHandleNatives_CallSiteContext::is_instance(oop obj) { - return obj != nullptr && is_subclass(obj->klass()); -} - inline bool java_lang_invoke_MemberName::is_instance(oop obj) { return obj != nullptr && obj->klass() == vmClasses::MemberName_klass(); } diff --git a/src/hotspot/share/classfile/javaClassesImpl.hpp b/src/hotspot/share/classfile/javaClassesImpl.hpp index 5f4df9391e388..b450a4e3cc417 100644 --- a/src/hotspot/share/classfile/javaClassesImpl.hpp +++ b/src/hotspot/share/classfile/javaClassesImpl.hpp @@ -36,7 +36,7 @@ CLASSLOADER_INJECTED_FIELDS(macro) \ RESOLVEDMETHOD_INJECTED_FIELDS(macro) \ MEMBERNAME_INJECTED_FIELDS(macro) \ - CALLSITECONTEXT_INJECTED_FIELDS(macro) \ + CALLSITE_INJECTED_FIELDS(macro) \ STACKFRAMEINFO_INJECTED_FIELDS(macro) \ MODULE_INJECTED_FIELDS(macro) \ THREAD_INJECTED_FIELDS(macro) \ diff --git a/src/hotspot/share/classfile/systemDictionaryShared.cpp b/src/hotspot/share/classfile/systemDictionaryShared.cpp index 0b40fb6d68137..b8e24bb2caace 100644 --- a/src/hotspot/share/classfile/systemDictionaryShared.cpp +++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp @@ -322,6 +322,13 @@ bool SystemDictionaryShared::check_for_exclusion_impl(InstanceKlass* k) { if (!k->is_linked()) { if (has_class_failed_verification(k)) { return warn_excluded(k, "Failed verification"); + } else if (CDSConfig::is_dumping_aot_linked_classes()) { + // Most loaded classes should have been speculatively linked by MetaspaceShared::link_class_for_cds(). + // However, we do not speculatively link old classes, as they are not recorded by + // SystemDictionaryShared::record_linking_constraint(). As a result, such an unlinked + // class may fail to verify in AOTLinkedClassBulkLoader::init_required_classes_for_loader(), + // causing the JVM to fail at bootstrap. + return warn_excluded(k, "Unlinked class not supported by AOTClassLinking"); } } else { if (!k->can_be_verified_at_dumptime()) { diff --git a/src/hotspot/share/classfile/vmClassMacros.hpp b/src/hotspot/share/classfile/vmClassMacros.hpp index 7dc2d7f60c963..351b9b0b53f24 100644 --- a/src/hotspot/share/classfile/vmClassMacros.hpp +++ b/src/hotspot/share/classfile/vmClassMacros.hpp @@ -123,7 +123,6 @@ do_klass(ABIDescriptor_klass, jdk_internal_foreign_abi_ABIDescriptor ) \ do_klass(VMStorage_klass, jdk_internal_foreign_abi_VMStorage ) \ do_klass(CallConv_klass, jdk_internal_foreign_abi_CallConv ) \ - do_klass(Context_klass, java_lang_invoke_MethodHandleNatives_CallSiteContext ) \ do_klass(ConstantCallSite_klass, java_lang_invoke_ConstantCallSite ) \ do_klass(MutableCallSite_klass, java_lang_invoke_MutableCallSite ) \ do_klass(VolatileCallSite_klass, java_lang_invoke_VolatileCallSite ) \ diff --git a/src/hotspot/share/classfile/vmSymbols.hpp b/src/hotspot/share/classfile/vmSymbols.hpp index 8ee5ff02a6a09..867a992839e35 100644 --- a/src/hotspot/share/classfile/vmSymbols.hpp +++ b/src/hotspot/share/classfile/vmSymbols.hpp @@ -325,11 +325,9 @@ class SerializeClosure; template(java_lang_invoke_MemberName, "java/lang/invoke/MemberName") \ template(java_lang_invoke_ResolvedMethodName, "java/lang/invoke/ResolvedMethodName") \ template(java_lang_invoke_MethodHandleNatives, "java/lang/invoke/MethodHandleNatives") \ - template(java_lang_invoke_MethodHandleNatives_CallSiteContext, "java/lang/invoke/MethodHandleNatives$CallSiteContext") \ template(java_lang_invoke_LambdaForm, "java/lang/invoke/LambdaForm") \ template(java_lang_invoke_InjectedProfile_signature, "Ljava/lang/invoke/InjectedProfile;") \ template(java_lang_invoke_LambdaForm_Compiled_signature, "Ljava/lang/invoke/LambdaForm$Compiled;") \ - template(java_lang_invoke_MethodHandleNatives_CallSiteContext_signature, "Ljava/lang/invoke/MethodHandleNatives$CallSiteContext;") \ /* internal up-calls made only by the JVM, via class sun.invoke.MethodHandleNatives: */ \ template(findMethodHandleType_name, "findMethodHandleType") \ template(findMethodHandleType_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/lang/invoke/MethodType;") \ diff --git a/src/hotspot/share/code/dependencyContext.cpp b/src/hotspot/share/code/dependencyContext.cpp index 6a7742cea8d3d..f64b6e15c1a69 100644 --- a/src/hotspot/share/code/dependencyContext.cpp +++ b/src/hotspot/share/code/dependencyContext.cpp @@ -167,12 +167,6 @@ void DependencyContext::clean_unloading_dependents() { } } -nmethodBucket* DependencyContext::release_and_get_next_not_unloading(nmethodBucket* b) { - nmethodBucket* next = b->next_not_unloading(); - release(b); - return next; - } - // // Invalidate all dependencies in the context void DependencyContext::remove_all_dependents() { @@ -213,18 +207,6 @@ void DependencyContext::remove_all_dependents() { set_dependencies(nullptr); } -void DependencyContext::remove_and_mark_for_deoptimization_all_dependents(DeoptimizationScope* deopt_scope) { - nmethodBucket* b = dependencies_not_unloading(); - set_dependencies(nullptr); - while (b != nullptr) { - nmethod* nm = b->get_nmethod(); - // Also count already (concurrently) marked nmethods to make sure - // deoptimization is triggered before execution in this thread continues. - deopt_scope->mark(nm); - b = release_and_get_next_not_unloading(b); - } -} - #ifndef PRODUCT bool DependencyContext::is_empty() { return dependencies() == nullptr; diff --git a/src/hotspot/share/code/dependencyContext.hpp b/src/hotspot/share/code/dependencyContext.hpp index 13b845cb59dde..7e8f71635096e 100644 --- a/src/hotspot/share/code/dependencyContext.hpp +++ b/src/hotspot/share/code/dependencyContext.hpp @@ -63,7 +63,7 @@ class nmethodBucket: public CHeapObj { // // Utility class to manipulate nmethod dependency context. // Dependency context can be attached either to an InstanceKlass (_dep_context field) -// or CallSiteContext oop for call_site_target dependencies (see javaClasses.hpp). +// or CallSite oop for call_site_target dependencies (see javaClasses.hpp). // DependencyContext class operates on some location which holds a nmethodBucket* value // and uint64_t integer recording the safepoint counter at the last cleanup. // @@ -92,7 +92,6 @@ class DependencyContext : public StackObj { #ifdef ASSERT // Safepoints are forbidden during DC lifetime. GC can invalidate // _dependency_context_addr if it relocates the holder - // (e.g. CallSiteContext Java object). SafepointStateTracker _safepoint_tracker; DependencyContext(nmethodBucket* volatile* bucket_addr, volatile uint64_t* last_cleanup_addr) @@ -114,9 +113,7 @@ class DependencyContext : public StackObj { void mark_dependent_nmethods(DeoptimizationScope* deopt_scope, DepChange& changes); void add_dependent_nmethod(nmethod* nm); void remove_all_dependents(); - void remove_and_mark_for_deoptimization_all_dependents(DeoptimizationScope* deopt_scope); void clean_unloading_dependents(); - static nmethodBucket* release_and_get_next_not_unloading(nmethodBucket* b); static void purge_dependency_contexts(); static void release(nmethodBucket* b); static void cleaning_start(); diff --git a/src/hotspot/share/compiler/compileBroker.cpp b/src/hotspot/share/compiler/compileBroker.cpp index 6659ac90dd822..c2e2e2d607f27 100644 --- a/src/hotspot/share/compiler/compileBroker.cpp +++ b/src/hotspot/share/compiler/compileBroker.cpp @@ -365,13 +365,24 @@ void CompileQueue::free_all() { while (next != nullptr) { CompileTask* current = next; next = current->next(); + bool found_waiter = false; { - // Wake up thread that blocks on the compile task. MutexLocker ct_lock(current->lock()); - current->lock()->notify(); + assert(current->waiting_for_completion_count() <= 1, "more than one thread are waiting for task"); + if (current->waiting_for_completion_count() > 0) { + // If another thread waits for this task, we must wake them up + // so they will stop waiting and free the task. + current->lock()->notify(); + found_waiter = true; + } + } + if (!found_waiter) { + // If no one was waiting for this task, we need to free it ourselves. In this case, the task + // is also certainly unlocked, because, again, there is no waiter. + // Otherwise, by convention, it's the waiters responsibility to free the task. + // Put the task back on the freelist. + CompileTask::free(current); } - // Put the task back on the freelist. - CompileTask::free(current); } _first = nullptr; _last = nullptr; @@ -1722,9 +1733,11 @@ void CompileBroker::wait_for_completion(CompileTask* task) { { MonitorLocker ml(thread, task->lock()); free_task = true; + task->inc_waiting_for_completion(); while (!task->is_complete() && !is_compilation_disabled_forever()) { ml.wait(); } + task->dec_waiting_for_completion(); } if (free_task) { diff --git a/src/hotspot/share/compiler/compileTask.cpp b/src/hotspot/share/compiler/compileTask.cpp index 2011fa656248b..e266a215de1b3 100644 --- a/src/hotspot/share/compiler/compileTask.cpp +++ b/src/hotspot/share/compiler/compileTask.cpp @@ -106,6 +106,8 @@ void CompileTask::initialize(int compile_id, _comp_level = comp_level; _num_inlined_bytecodes = 0; + _waiting_count = 0; + _is_complete = false; _is_success = false; @@ -282,21 +284,6 @@ void CompileTask::print_impl(outputStream* st, Method* method, int compile_id, i } } -void CompileTask::print_inline_indent(int inline_level, outputStream* st) { - // 1234567 - st->print(" "); // print timestamp - // 1234 - st->print(" "); // print compilation number - // %s!bn - st->print(" "); // print method attributes - if (TieredCompilation) { - st->print(" "); - } - st->print(" "); // more indent - st->print(" "); // initial inlining indent - for (int i = 0; i < inline_level; i++) st->print(" "); -} - // ------------------------------------------------------------------ // CompileTask::print_compilation void CompileTask::print(outputStream* st, const char* msg, bool short_form, bool cr) { @@ -410,45 +397,76 @@ bool CompileTask::check_break_at_flags() { // ------------------------------------------------------------------ // CompileTask::print_inlining void CompileTask::print_inlining_inner(outputStream* st, ciMethod* method, int inline_level, int bci, InliningResult result, const char* msg) { + print_inlining_header(st, method, inline_level, bci); + print_inlining_inner_message(st, result, msg); + st->cr(); +} + +void CompileTask::print_inlining_header(outputStream* st, ciMethod* method, int inline_level, int bci) { // 1234567 - st->print(" "); // print timestamp + st->print(" "); // print timestamp // 1234 - st->print(" "); // print compilation number + st->print(" "); // print compilation number // method attributes if (method->is_loaded()) { - const char sync_char = method->is_synchronized() ? 's' : ' '; + const char sync_char = method->is_synchronized() ? 's' : ' '; const char exception_char = method->has_exception_handlers() ? '!' : ' '; - const char monitors_char = method->has_monitor_bytecodes() ? 'm' : ' '; + const char monitors_char = method->has_monitor_bytecodes() ? 'm' : ' '; // print method attributes st->print(" %c%c%c ", sync_char, exception_char, monitors_char); } else { // %s!bn - st->print(" "); // print method attributes + st->print(" "); // print method attributes } if (TieredCompilation) { st->print(" "); } - st->print(" "); // more indent - st->print(" "); // initial inlining indent + st->print(" "); // more indent + st->print(" "); // initial inlining indent - for (int i = 0; i < inline_level; i++) st->print(" "); + for (int i = 0; i < inline_level; i++) { + st->print(" "); + } + + st->print("@ %d ", bci); // print bci + print_inline_inner_method_info(st, method); +} - st->print("@ %d ", bci); // print bci +void CompileTask::print_inline_inner_method_info(outputStream* st, ciMethod* method) { method->print_short_name(st); - if (method->is_loaded()) + if (method->is_loaded()) { st->print(" (%d bytes)", method->code_size()); - else + } else { st->print(" (not loaded)"); + } +} + +void CompileTask::print_inline_indent(int inline_level, outputStream* st) { + // 1234567 + st->print(" "); // print timestamp + // 1234 + st->print(" "); // print compilation number + // %s!bn + st->print(" "); // print method attributes + if (TieredCompilation) { + st->print(" "); + } + st->print(" "); // more indent + st->print(" "); // initial inlining indent + for (int i = 0; i < inline_level; i++) { + st->print(" "); + } +} +void CompileTask::print_inlining_inner_message(outputStream* st, InliningResult result, const char* msg) { if (msg != nullptr) { st->print(" %s%s", result == InliningResult::SUCCESS ? "" : "failed to inline: ", msg); } else if (result == InliningResult::FAILURE) { st->print(" %s", "failed to inline"); } - st->cr(); } void CompileTask::print_ul(const char* msg){ diff --git a/src/hotspot/share/compiler/compileTask.hpp b/src/hotspot/share/compiler/compileTask.hpp index 37459bd0ff52f..14591a3abdf0c 100644 --- a/src/hotspot/share/compiler/compileTask.hpp +++ b/src/hotspot/share/compiler/compileTask.hpp @@ -98,6 +98,7 @@ class CompileTask : public CHeapObj { // Compilation state for a blocking JVMCI compilation JVMCICompileState* _blocking_jvmci_compile_state; #endif + int _waiting_count; // See waiting_for_completion_count() int _comp_level; int _num_inlined_bytecodes; CompileTask* _next, *_prev; @@ -174,6 +175,23 @@ class CompileTask : public CHeapObj { Monitor* lock() const { return _lock; } + // See how many threads are waiting for this task. Must have lock to read this. + int waiting_for_completion_count() { + assert(_lock->owned_by_self(), "must have lock to use waiting_for_completion_count()"); + return _waiting_count; + } + // Indicates that a thread is waiting for this task to complete. Must have lock to use this. + void inc_waiting_for_completion() { + assert(_lock->owned_by_self(), "must have lock to use inc_waiting_for_completion()"); + _waiting_count++; + } + // Indicates that a thread stopped waiting for this task to complete. Must have lock to use this. + void dec_waiting_for_completion() { + assert(_lock->owned_by_self(), "must have lock to use dec_waiting_for_completion()"); + assert(_waiting_count > 0, "waiting count is not positive"); + _waiting_count--; + } + void mark_complete() { _is_complete = true; } void mark_success() { _is_success = true; } void mark_started(jlong time) { _time_started = time; } @@ -218,6 +236,9 @@ class CompileTask : public CHeapObj { } static void print_ul(const nmethod* nm, const char* msg = nullptr); + /** + * @deprecated Please rely on Compile::inline_printer. Do not directly write inlining information to tty. + */ static void print_inline_indent(int inline_level, outputStream* st = tty); void print_tty(); @@ -235,7 +256,11 @@ class CompileTask : public CHeapObj { bool check_break_at_flags(); + static void print_inlining_header(outputStream* st, ciMethod* method, int inline_level, int bci); static void print_inlining_inner(outputStream* st, ciMethod* method, int inline_level, int bci, InliningResult result, const char* msg = nullptr); + static void print_inline_inner_method_info(outputStream* st, ciMethod* method); + static void print_inlining_inner_message(outputStream* st, InliningResult result, const char* msg); + static void print_inlining_tty(ciMethod* method, int inline_level, int bci, InliningResult result, const char* msg = nullptr) { print_inlining_inner(tty, method, inline_level, bci, result, msg); } diff --git a/src/hotspot/share/nmt/nmtTreap.hpp b/src/hotspot/share/nmt/nmtTreap.hpp index e7cc91eefd9a1..7e4ad3df95bf1 100644 --- a/src/hotspot/share/nmt/nmtTreap.hpp +++ b/src/hotspot/share/nmt/nmtTreap.hpp @@ -66,6 +66,8 @@ class Treap { TreapNode* _right; public: + TreapNode(const K& k, uint64_t p) : _priority(p), _key(k), _left(nullptr), _right(nullptr) {} + TreapNode(const K& k, const V& v, uint64_t p) : _priority(p), _key(k), @@ -313,6 +315,30 @@ class Treap { return candidate; } + struct FindResult { + FindResult(TreapNode* node, bool new_node) : node(node), new_node(new_node) {} + TreapNode* const node; + bool const new_node; + }; + + // Finds the node for the given k in the tree or inserts a new node with the default constructed value. + FindResult find(const K& k) { + if (TreapNode* found = find(_root, k)) { + return FindResult(found, false); + } + _node_count++; + // Doesn't exist, make node + void* node_place = _allocator.allocate(sizeof(TreapNode)); + uint64_t prio = prng_next(); + TreapNode* node = new (node_place) TreapNode(k, prio); + + // (LEQ_k, GT_k) + node_pair split_up = split(this->_root, k); + // merge(merge(LEQ_k, EQ_k), GT_k) + this->_root = merge(merge(split_up.left, node), split_up.right); + return FindResult(node, true); + } + TreapNode* closest_gt(const K& key) { TreapNode* candidate = nullptr; TreapNode* pos = _root; diff --git a/src/hotspot/share/opto/bytecodeInfo.cpp b/src/hotspot/share/opto/bytecodeInfo.cpp index 55c72a0c35a27..e618a708f618e 100644 --- a/src/hotspot/share/opto/bytecodeInfo.cpp +++ b/src/hotspot/share/opto/bytecodeInfo.cpp @@ -113,7 +113,8 @@ static bool is_unboxing_method(ciMethod* callee_method, Compile* C) { // positive filter: should callee be inlined? bool InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method, - int caller_bci, bool& should_delay, ciCallProfile& profile) { + JVMState* caller_jvms, bool& should_delay, ciCallProfile& profile) { + int caller_bci = caller_jvms->bci(); // Allows targeted inlining if (C->directive()->should_inline(callee_method)) { set_msg("force inline by CompileCommand"); @@ -143,9 +144,9 @@ bool InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method, // Check for too many throws (and not too huge) if(callee_method->interpreter_throwout_count() > InlineThrowCount && size < InlineThrowMaxSize ) { - if (C->print_inlining() && Verbose) { - CompileTask::print_inline_indent(inline_level()); - tty->print_cr("Inlined method with many throws (throws=%d):", callee_method->interpreter_throwout_count()); + if (Verbose) { + outputStream* stream = C->inline_printer()->record(callee_method, caller_jvms, InliningResult::SUCCESS); + stream->print("Inlined method with many throws (throws=%d):", callee_method->interpreter_throwout_count()); } set_msg("many throws"); return true; @@ -168,11 +169,8 @@ bool InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method, max_inline_size = C->freq_inline_size(); if (size <= max_inline_size && TraceFrequencyInlining) { - CompileTask::print_inline_indent(inline_level()); - tty->print_cr("Inlined frequent method (freq=%lf):", freq); - CompileTask::print_inline_indent(inline_level()); - callee_method->print(); - tty->cr(); + outputStream* stream = C->inline_printer()->record(callee_method, caller_jvms, InliningResult::SUCCESS); + stream->print("Inlined frequent method (freq=%lf):", freq); } } else { // Not hot. Check for medium-sized pre-existing nmethod at cold sites. @@ -376,7 +374,7 @@ bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method, _forced_inline = false; // Reset // 'should_delay' can be overridden during replay compilation - if (!should_inline(callee_method, caller_method, caller_bci, should_delay, profile)) { + if (!should_inline(callee_method, caller_method, jvms, should_delay, profile)) { return false; } // 'should_delay' can be overridden during replay compilation @@ -534,8 +532,9 @@ const char* InlineTree::check_can_parse(ciMethod* callee) { } //------------------------------print_inlining--------------------------------- -void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci, - ciMethod* caller_method, bool success) const { +void InlineTree::print_inlining(ciMethod* callee_method, JVMState* jvm, bool success) const { + int caller_bci = jvm->bci(); + ciMethod* caller_method = jvm->method(); const char* inline_msg = msg(); assert(inline_msg != nullptr, "just checking"); if (C->log() != nullptr) { @@ -545,19 +544,11 @@ void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci, C->log()->inline_fail(inline_msg); } } - CompileTask::print_inlining_ul(callee_method, inline_level(), - caller_bci, inlining_result_of(success), inline_msg); - if (C->print_inlining()) { - C->print_inlining(callee_method, inline_level(), caller_bci, inlining_result_of(success), inline_msg); - guarantee(callee_method != nullptr, "would crash in CompilerEvent::InlineEvent::post"); - if (Verbose) { - const InlineTree *top = this; - while (top->caller_tree() != nullptr) { top = top->caller_tree(); } - //tty->print(" bcs: %d+%d invoked: %d", top->count_inline_bcs(), callee_method->code_size(), callee_method->interpreter_invocation_count()); - } - } + CompileTask::print_inlining_ul(callee_method, inline_level(), caller_bci, inlining_result_of(success), inline_msg); + C->inline_printer()->record(callee_method, jvm, inlining_result_of(success), inline_msg); EventCompilerInlining event; if (event.should_commit()) { + guarantee(callee_method != nullptr, "would crash in CompilerEvent::InlineEvent::post"); CompilerEvent::InlineEvent::post(event, C->compile_id(), caller_method->get_Method(), callee_method, success, inline_msg, caller_bci); } } @@ -582,14 +573,14 @@ bool InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms, ciCallPro // Do some initial checks. if (!pass_initial_checks(caller_method, caller_bci, callee_method)) { set_msg("failed initial checks"); - print_inlining(callee_method, caller_bci, caller_method, false /* !success */); + print_inlining(callee_method, jvms, false /* !success */); return false; } // Do some parse checks. set_msg(check_can_parse(callee_method)); if (msg() != nullptr) { - print_inlining(callee_method, caller_bci, caller_method, false /* !success */); + print_inlining(callee_method, jvms, false /* !success */); return false; } @@ -601,7 +592,7 @@ bool InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms, ciCallPro if (msg() == nullptr) { set_msg("inline (hot)"); } - print_inlining(callee_method, caller_bci, caller_method, true /* success */); + print_inlining(callee_method, jvms, true /* success */); InlineTree* callee_tree = build_inline_tree_for_callee(callee_method, jvms, caller_bci); if (should_delay) { // Record late inlining decision in order to dump it for compiler replay @@ -613,7 +604,7 @@ bool InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms, ciCallPro if (msg() == nullptr) { set_msg("too cold to inline"); } - print_inlining(callee_method, caller_bci, caller_method, false /* !success */ ); + print_inlining(callee_method, jvms, false /* !success */); return false; } } @@ -634,8 +625,7 @@ InlineTree *InlineTree::build_inline_tree_for_callee( ciMethod* callee_method, J max_inline_level_adjust += 1; // don't count method handle calls from java.lang.invoke implementation } if (max_inline_level_adjust != 0 && C->print_inlining() && (Verbose || WizardMode)) { - CompileTask::print_inline_indent(inline_level()); - tty->print_cr(" \\-> discounting inline depth"); + C->inline_printer()->record(callee_method, caller_jvms, InliningResult::SUCCESS, " \\-> discounting inline depth"); } if (max_inline_level_adjust != 0 && C->log()) { int id1 = C->log()->identify(caller_jvms->method()); diff --git a/src/hotspot/share/opto/callGenerator.cpp b/src/hotspot/share/opto/callGenerator.cpp index 8ebb9f662c4f7..ec7117e3568ca 100644 --- a/src/hotspot/share/opto/callGenerator.cpp +++ b/src/hotspot/share/opto/callGenerator.cpp @@ -84,7 +84,6 @@ class ParseGenerator : public InlineCallGenerator { JVMState* ParseGenerator::generate(JVMState* jvms) { Compile* C = Compile::current(); - C->print_inlining_update(this); if (is_osr()) { // The JVMS for a OSR has a single argument (see its TypeFunc). @@ -143,7 +142,6 @@ class DirectCallGenerator : public CallGenerator { JVMState* DirectCallGenerator::generate(JVMState* jvms) { GraphKit kit(jvms); - kit.C->print_inlining_update(this); bool is_static = method()->is_static(); address target = is_static ? SharedRuntime::get_resolve_static_call_stub() : SharedRuntime::get_resolve_opt_virtual_call_stub(); @@ -218,8 +216,6 @@ JVMState* VirtualCallGenerator::generate(JVMState* jvms) { GraphKit kit(jvms); Node* receiver = kit.argument(0); - kit.C->print_inlining_update(this); - if (kit.C->log() != nullptr) { kit.C->log()->elem("virtual_call bci='%d'", jvms->bci()); } @@ -353,15 +349,6 @@ class LateInlineCallGenerator : public DirectCallGenerator { return DirectCallGenerator::generate(jvms); } - virtual void print_inlining_late(InliningResult result, const char* msg) { - CallNode* call = call_node(); - Compile* C = Compile::current(); - C->print_inlining_assert_ready(); - C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), result, msg); - C->print_inlining_move_to(this); - C->print_inlining_update_delayed(this); - } - virtual void set_unique_id(jlong id) { _unique_id = id; } @@ -431,9 +418,9 @@ bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place if (cg != nullptr) { - if (!allow_inline && (C->print_inlining() || C->print_intrinsics())) { - C->print_inlining(cg->method(), jvms->depth()-1, call_node()->jvms()->bci(), InliningResult::FAILURE, - "late method handle call resolution"); + if (!allow_inline) { + C->inline_printer()->record(cg->method(), call_node()->jvms(), InliningResult::FAILURE, + "late method handle call resolution"); } assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining"); _inline_cg = cg; @@ -499,15 +486,6 @@ class LateInlineVirtualCallGenerator : public VirtualCallGenerator { return new_jvms; } - virtual void print_inlining_late(InliningResult result, const char* msg) { - CallNode* call = call_node(); - Compile* C = Compile::current(); - C->print_inlining_assert_ready(); - C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), result, msg); - C->print_inlining_move_to(this); - C->print_inlining_update_delayed(this); - } - virtual void set_unique_id(jlong id) { _unique_id = id; } @@ -531,20 +509,16 @@ bool LateInlineVirtualCallGenerator::do_late_inline_check(Compile* C, JVMState* Node* receiver = jvms->map()->argument(jvms, 0); const Type* recv_type = C->initial_gvn()->type(receiver); if (recv_type->maybe_null()) { - if (C->print_inlining() || C->print_intrinsics()) { - C->print_inlining(method(), jvms->depth()-1, call_node()->jvms()->bci(), InliningResult::FAILURE, - "late call devirtualization failed (receiver may be null)"); - } + C->inline_printer()->record(method(), call_node()->jvms(), InliningResult::FAILURE, + "late call devirtualization failed (receiver may be null)"); return false; } // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call. bool allow_inline = C->inlining_incrementally(); if (!allow_inline && _callee->holder()->is_interface()) { // Don't convert the interface call to a direct call guarded by an interface subtype check. - if (C->print_inlining() || C->print_intrinsics()) { - C->print_inlining(method(), jvms->depth()-1, call_node()->jvms()->bci(), InliningResult::FAILURE, - "late call devirtualization failed (interface call)"); - } + C->inline_printer()->record(method(), call_node()->jvms(), InliningResult::FAILURE, + "late call devirtualization failed (interface call)"); return false; } CallGenerator* cg = C->call_generator(_callee, @@ -557,9 +531,8 @@ bool LateInlineVirtualCallGenerator::do_late_inline_check(Compile* C, JVMState* true /*allow_intrinsics*/); if (cg != nullptr) { - if (!allow_inline && (C->print_inlining() || C->print_intrinsics())) { - C->print_inlining(cg->method(), jvms->depth()-1, call_node()->jvms()->bci(), InliningResult::FAILURE, - "late call devirtualization"); + if (!allow_inline) { + C->inline_printer()->record(cg->method(), call_node()->jvms(), InliningResult::FAILURE, "late call devirtualization"); } assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining"); _inline_cg = cg; @@ -682,21 +655,13 @@ void CallGenerator::do_late_inline_helper() { map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1)); } - C->print_inlining_assert_ready(); - - C->print_inlining_move_to(this); - C->log_late_inline(this); // JVMState is ready, so time to perform some checks and prepare for inlining attempt. if (!do_late_inline_check(C, jvms)) { map->disconnect_inputs(C); - C->print_inlining_update_delayed(this); return; } - if (C->print_inlining() && (is_mh_late_inline() || is_virtual_late_inline())) { - C->print_inlining_update_delayed(this); - } // Setup default node notes to be picked up by the inlining Node_Notes* old_nn = C->node_notes_at(call->_idx); @@ -711,6 +676,18 @@ void CallGenerator::do_late_inline_helper() { if (new_jvms == nullptr) return; // no change if (C->failing()) return; + if (is_mh_late_inline()) { + C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (method handle)"); + } else if (is_string_late_inline()) { + C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (string method)"); + } else if (is_boxing_late_inline()) { + C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (boxing method)"); + } else if (is_vector_reboxing_late_inline()) { + C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (vector reboxing method)"); + } else { + C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded"); + } + // Capture any exceptional control flow GraphKit kit(new_jvms); @@ -782,6 +759,8 @@ class LateInlineBoxingCallGenerator : public LateInlineCallGenerator { return new_jvms; } + virtual bool is_boxing_late_inline() const { return true; } + virtual CallGenerator* with_call_node(CallNode* call) { LateInlineBoxingCallGenerator* cg = new LateInlineBoxingCallGenerator(method(), _inline_cg); cg->set_call_node(call->as_CallStaticJava()); @@ -810,6 +789,8 @@ class LateInlineVectorReboxingCallGenerator : public LateInlineCallGenerator { return new_jvms; } + virtual bool is_vector_reboxing_late_inline() const { return true; } + virtual CallGenerator* with_call_node(CallNode* call) { LateInlineVectorReboxingCallGenerator* cg = new LateInlineVectorReboxingCallGenerator(method(), _inline_cg); cg->set_call_node(call->as_CallStaticJava()); @@ -875,7 +856,6 @@ CallGenerator* CallGenerator::for_guarded_call(ciKlass* guarded_receiver, JVMState* PredictedCallGenerator::generate(JVMState* jvms) { GraphKit kit(jvms); - kit.C->print_inlining_update(this); PhaseGVN& gvn = kit.gvn(); // We need an explicit receiver null_check before checking its type. // We share a map with the caller, so his JVMS gets adjusted. @@ -932,6 +912,9 @@ JVMState* PredictedCallGenerator::generate(JVMState* jvms) { // Make the hot call: JVMState* new_jvms = _if_hit->generate(kit.sync_jvms()); + if (kit.failing()) { + return nullptr; + } if (new_jvms == nullptr) { // Inline failed, so make a direct call. assert(_if_hit->is_inline(), "must have been a failed inline"); @@ -1045,8 +1028,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* const int vtable_index = Method::invalid_vtable_index; if (!ciMethod::is_consistent_info(callee, target)) { - print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), - "signatures mismatch"); + print_inlining_failure(C, callee, jvms, "signatures mismatch"); return nullptr; } @@ -1059,15 +1041,12 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* } else { assert(receiver->bottom_type() == TypePtr::NULL_PTR, "not a null: %s", Type::str(receiver->bottom_type())); - print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), - "receiver is always null"); + print_inlining_failure(C, callee, jvms, "receiver is always null"); } } else { - print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), - "receiver not constant"); + print_inlining_failure(C, callee, jvms, "receiver not constant"); } - } - break; + } break; case vmIntrinsics::_linkToVirtual: case vmIntrinsics::_linkToStatic: @@ -1082,8 +1061,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget(); if (!ciMethod::is_consistent_info(callee, target)) { - print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), - "signatures mismatch"); + print_inlining_failure(C, callee, jvms, "signatures mismatch"); return nullptr; } @@ -1098,8 +1076,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* Node* recv = kit.argument(0); Node* casted_recv = kit.maybe_narrow_object_type(recv, signature->accessing_klass()); if (casted_recv->is_top()) { - print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), - "argument types mismatch"); + print_inlining_failure(C, callee, jvms, "argument types mismatch"); return nullptr; // FIXME: effectively dead; issue a halt node instead } else if (casted_recv != recv) { kit.set_argument(0, casted_recv); @@ -1112,8 +1089,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* Node* arg = kit.argument(receiver_skip + j); Node* casted_arg = kit.maybe_narrow_object_type(arg, t->as_klass()); if (casted_arg->is_top()) { - print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), - "argument types mismatch"); + print_inlining_failure(C, callee, jvms, "argument types mismatch"); return nullptr; // FIXME: effectively dead; issue a halt node instead } else if (casted_arg != arg) { kit.set_argument(receiver_skip + j, casted_arg); @@ -1151,15 +1127,12 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* speculative_receiver_type); return cg; } else { - print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), - "member_name not constant"); + print_inlining_failure(C, callee, jvms, "member_name not constant"); } - } - break; + } break; - case vmIntrinsics::_linkToNative: - print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), - "native call"); + case vmIntrinsics::_linkToNative: + print_inlining_failure(C, callee, jvms, "native call"); break; default: @@ -1259,6 +1232,9 @@ JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) { PreserveJVMState pjvms(&kit); // Generate intrinsic code: JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms()); + if (kit.failing()) { + return nullptr; + } if (new_jvms == nullptr) { // Intrinsic failed, use normal compilation path for this predicate. slow_region->add_req(kit.control()); @@ -1404,7 +1380,6 @@ CallGenerator::for_uncommon_trap(ciMethod* m, JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) { GraphKit kit(jvms); - kit.C->print_inlining_update(this); // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver). // Callsite signature can be different from actual method being called (i.e _linkTo* sites). // Use callsite signature always. diff --git a/src/hotspot/share/opto/callGenerator.hpp b/src/hotspot/share/opto/callGenerator.hpp index 77182580207ca..82b195e0c7603 100644 --- a/src/hotspot/share/opto/callGenerator.hpp +++ b/src/hotspot/share/opto/callGenerator.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -75,6 +75,8 @@ class CallGenerator : public ArenaObj { // same but for method handle calls virtual bool is_mh_late_inline() const { return false; } virtual bool is_string_late_inline() const { return false; } + virtual bool is_boxing_late_inline() const { return false; } + virtual bool is_vector_reboxing_late_inline() const { return false; } virtual bool is_virtual_late_inline() const { return false; } // Replace the call with an inline version of the code @@ -171,28 +173,14 @@ class CallGenerator : public ArenaObj { CallGenerator* cg); virtual Node* generate_predicate(JVMState* jvms, int predicate) { return nullptr; }; - virtual void print_inlining_late(InliningResult result, const char* msg) { ShouldNotReachHere(); } - - static void print_inlining(Compile* C, ciMethod* callee, int inline_level, int bci, const char* msg) { - print_inlining_impl(C, callee, inline_level, bci, InliningResult::SUCCESS, msg); - } - - static void print_inlining_failure(Compile* C, ciMethod* callee, int inline_level, int bci, const char* msg) { - print_inlining_impl(C, callee, inline_level, bci, InliningResult::FAILURE, msg); + static void print_inlining_failure(Compile* C, ciMethod* callee, JVMState* jvms, const char* msg) { + C->inline_printer()->record(callee, jvms, InliningResult::FAILURE, msg); C->log_inline_failure(msg); } static bool is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m); static bool is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m); static bool is_inlined_method_handle_intrinsic(ciMethod* symbolic_info, ciMethod* m); - -private: - static void print_inlining_impl(Compile* C, ciMethod* callee, int inline_level, int bci, - InliningResult result, const char* msg) { - if (C->print_inlining()) { - C->print_inlining(callee, inline_level, bci, result, msg); - } - } }; diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp index a96735a15e70b..8fb1eda0001f5 100644 --- a/src/hotspot/share/opto/compile.cpp +++ b/src/hotspot/share/opto/compile.cpp @@ -610,77 +610,75 @@ void Compile::print_ideal_ir(const char* phase_name) { // the continuation bci for on stack replacement. -Compile::Compile( ciEnv* ci_env, ciMethod* target, int osr_bci, - Options options, DirectiveSet* directive) - : Phase(Compiler), - _compile_id(ci_env->compile_id()), - _options(options), - _method(target), - _entry_bci(osr_bci), - _ilt(nullptr), - _stub_function(nullptr), - _stub_name(nullptr), - _stub_entry_point(nullptr), - _max_node_limit(MaxNodeLimit), - _post_loop_opts_phase(false), - _allow_macro_nodes(true), - _inlining_progress(false), - _inlining_incrementally(false), - _do_cleanup(false), - _has_reserved_stack_access(target->has_reserved_stack_access()), +Compile::Compile(ciEnv* ci_env, ciMethod* target, int osr_bci, + Options options, DirectiveSet* directive) + : Phase(Compiler), + _compile_id(ci_env->compile_id()), + _options(options), + _method(target), + _entry_bci(osr_bci), + _ilt(nullptr), + _stub_function(nullptr), + _stub_name(nullptr), + _stub_entry_point(nullptr), + _max_node_limit(MaxNodeLimit), + _post_loop_opts_phase(false), + _allow_macro_nodes(true), + _inlining_progress(false), + _inlining_incrementally(false), + _do_cleanup(false), + _has_reserved_stack_access(target->has_reserved_stack_access()), #ifndef PRODUCT - _igv_idx(0), - _trace_opto_output(directive->TraceOptoOutputOption), + _igv_idx(0), + _trace_opto_output(directive->TraceOptoOutputOption), #endif - _has_method_handle_invokes(false), - _clinit_barrier_on_entry(false), - _stress_seed(0), - _comp_arena(mtCompiler), - _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())), - _env(ci_env), - _directive(directive), - _log(ci_env->log()), - _first_failure_details(nullptr), - _intrinsics (comp_arena(), 0, 0, nullptr), - _macro_nodes (comp_arena(), 8, 0, nullptr), - _parse_predicates (comp_arena(), 8, 0, nullptr), - _template_assertion_predicate_opaqs (comp_arena(), 8, 0, nullptr), - _expensive_nodes (comp_arena(), 8, 0, nullptr), - _for_post_loop_igvn(comp_arena(), 8, 0, nullptr), - _unstable_if_traps (comp_arena(), 8, 0, nullptr), - _coarsened_locks (comp_arena(), 8, 0, nullptr), - _congraph(nullptr), - NOT_PRODUCT(_igv_printer(nullptr) COMMA) - _unique(0), - _dead_node_count(0), - _dead_node_list(comp_arena()), - _node_arena_one(mtCompiler, Arena::Tag::tag_node), - _node_arena_two(mtCompiler, Arena::Tag::tag_node), - _node_arena(&_node_arena_one), - _mach_constant_base_node(nullptr), - _Compile_types(mtCompiler), - _initial_gvn(nullptr), - _igvn_worklist(nullptr), - _types(nullptr), - _node_hash(nullptr), - _late_inlines(comp_arena(), 2, 0, nullptr), - _string_late_inlines(comp_arena(), 2, 0, nullptr), - _boxing_late_inlines(comp_arena(), 2, 0, nullptr), - _vector_reboxing_late_inlines(comp_arena(), 2, 0, nullptr), - _late_inlines_pos(0), - _number_of_mh_late_inlines(0), - _oom(false), - _print_inlining_stream(new (mtCompiler) stringStream()), - _print_inlining_list(nullptr), - _print_inlining_idx(0), - _print_inlining_output(nullptr), - _replay_inline_data(nullptr), - _java_calls(0), - _inner_loops(0), - _interpreter_frame_size(0), - _output(nullptr) + _has_method_handle_invokes(false), + _clinit_barrier_on_entry(false), + _stress_seed(0), + _comp_arena(mtCompiler), + _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())), + _env(ci_env), + _directive(directive), + _log(ci_env->log()), + _first_failure_details(nullptr), + _intrinsics(comp_arena(), 0, 0, nullptr), + _macro_nodes(comp_arena(), 8, 0, nullptr), + _parse_predicates(comp_arena(), 8, 0, nullptr), + _template_assertion_predicate_opaqs(comp_arena(), 8, 0, nullptr), + _expensive_nodes(comp_arena(), 8, 0, nullptr), + _for_post_loop_igvn(comp_arena(), 8, 0, nullptr), + _unstable_if_traps(comp_arena(), 8, 0, nullptr), + _coarsened_locks(comp_arena(), 8, 0, nullptr), + _congraph(nullptr), + NOT_PRODUCT(_igv_printer(nullptr) COMMA) + _unique(0), + _dead_node_count(0), + _dead_node_list(comp_arena()), + _node_arena_one(mtCompiler, Arena::Tag::tag_node), + _node_arena_two(mtCompiler, Arena::Tag::tag_node), + _node_arena(&_node_arena_one), + _mach_constant_base_node(nullptr), + _Compile_types(mtCompiler), + _initial_gvn(nullptr), + _igvn_worklist(nullptr), + _types(nullptr), + _node_hash(nullptr), + _late_inlines(comp_arena(), 2, 0, nullptr), + _string_late_inlines(comp_arena(), 2, 0, nullptr), + _boxing_late_inlines(comp_arena(), 2, 0, nullptr), + _vector_reboxing_late_inlines(comp_arena(), 2, 0, nullptr), + _late_inlines_pos(0), + _number_of_mh_late_inlines(0), + _oom(false), + _replay_inline_data(nullptr), + _inline_printer(this), + _java_calls(0), + _inner_loops(0), + _interpreter_frame_size(0), + _output(nullptr) #ifndef PRODUCT - , _in_dump_cnt(0) + , + _in_dump_cnt(0) #endif { C = this; @@ -743,7 +741,6 @@ Compile::Compile( ciEnv* ci_env, ciMethod* target, int osr_bci, PhaseGVN gvn; set_initial_gvn(&gvn); - print_inlining_init(); { // Scope for timing the parser TracePhase tp(_t_parser); @@ -886,71 +883,68 @@ Compile::Compile( ciEnv* ci_env, ciMethod* target, int osr_bci, //------------------------------Compile---------------------------------------- // Compile a runtime stub -Compile::Compile( ciEnv* ci_env, - TypeFunc_generator generator, - address stub_function, - const char *stub_name, - int is_fancy_jump, - bool pass_tls, - bool return_pc, - DirectiveSet* directive) - : Phase(Compiler), - _compile_id(0), - _options(Options::for_runtime_stub()), - _method(nullptr), - _entry_bci(InvocationEntryBci), - _stub_function(stub_function), - _stub_name(stub_name), - _stub_entry_point(nullptr), - _max_node_limit(MaxNodeLimit), - _post_loop_opts_phase(false), - _allow_macro_nodes(true), - _inlining_progress(false), - _inlining_incrementally(false), - _has_reserved_stack_access(false), +Compile::Compile(ciEnv* ci_env, + TypeFunc_generator generator, + address stub_function, + const char* stub_name, + int is_fancy_jump, + bool pass_tls, + bool return_pc, + DirectiveSet* directive) + : Phase(Compiler), + _compile_id(0), + _options(Options::for_runtime_stub()), + _method(nullptr), + _entry_bci(InvocationEntryBci), + _stub_function(stub_function), + _stub_name(stub_name), + _stub_entry_point(nullptr), + _max_node_limit(MaxNodeLimit), + _post_loop_opts_phase(false), + _allow_macro_nodes(true), + _inlining_progress(false), + _inlining_incrementally(false), + _has_reserved_stack_access(false), #ifndef PRODUCT - _igv_idx(0), - _trace_opto_output(directive->TraceOptoOutputOption), + _igv_idx(0), + _trace_opto_output(directive->TraceOptoOutputOption), #endif - _has_method_handle_invokes(false), - _clinit_barrier_on_entry(false), - _stress_seed(0), - _comp_arena(mtCompiler), - _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())), - _env(ci_env), - _directive(directive), - _log(ci_env->log()), - _first_failure_details(nullptr), - _for_post_loop_igvn(comp_arena(), 8, 0, nullptr), - _congraph(nullptr), - NOT_PRODUCT(_igv_printer(nullptr) COMMA) - _unique(0), - _dead_node_count(0), - _dead_node_list(comp_arena()), - _node_arena_one(mtCompiler), - _node_arena_two(mtCompiler), - _node_arena(&_node_arena_one), - _mach_constant_base_node(nullptr), - _Compile_types(mtCompiler), - _initial_gvn(nullptr), - _igvn_worklist(nullptr), - _types(nullptr), - _node_hash(nullptr), - _number_of_mh_late_inlines(0), - _oom(false), - _print_inlining_stream(new (mtCompiler) stringStream()), - _print_inlining_list(nullptr), - _print_inlining_idx(0), - _print_inlining_output(nullptr), - _replay_inline_data(nullptr), - _java_calls(0), - _inner_loops(0), - _interpreter_frame_size(0), - _output(nullptr), + _has_method_handle_invokes(false), + _clinit_barrier_on_entry(false), + _stress_seed(0), + _comp_arena(mtCompiler), + _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())), + _env(ci_env), + _directive(directive), + _log(ci_env->log()), + _first_failure_details(nullptr), + _for_post_loop_igvn(comp_arena(), 8, 0, nullptr), + _congraph(nullptr), + NOT_PRODUCT(_igv_printer(nullptr) COMMA) + _unique(0), + _dead_node_count(0), + _dead_node_list(comp_arena()), + _node_arena_one(mtCompiler), + _node_arena_two(mtCompiler), + _node_arena(&_node_arena_one), + _mach_constant_base_node(nullptr), + _Compile_types(mtCompiler), + _initial_gvn(nullptr), + _igvn_worklist(nullptr), + _types(nullptr), + _node_hash(nullptr), + _number_of_mh_late_inlines(0), + _oom(false), + _replay_inline_data(nullptr), + _inline_printer(this), + _java_calls(0), + _inner_loops(0), + _interpreter_frame_size(0), + _output(nullptr), #ifndef PRODUCT - _in_dump_cnt(0), + _in_dump_cnt(0), #endif - _allowed_reasons(0) { + _allowed_reasons(0) { C = this; TraceTime t1(nullptr, &_t_totalCompilation, CITime, false); @@ -991,7 +985,6 @@ Compile::Compile( ciEnv* ci_env, } Compile::~Compile() { - delete _print_inlining_stream; delete _first_failure_details; }; @@ -2112,7 +2105,7 @@ void Compile::inline_incrementally(PhaseIterGVN& igvn) { CallGenerator* cg = _late_inlines.at(i); const char* msg = "live nodes > LiveNodeCountInliningCutoff"; if (do_print_inlining) { - cg->print_inlining_late(InliningResult::FAILURE, msg); + inline_printer()->record(cg->method(), cg->call_node()->jvms(), InliningResult::FAILURE, msg); } log_late_inline_failure(cg, msg); } @@ -2232,8 +2225,6 @@ void Compile::Optimize() { ResourceMark rm; - print_inlining_reinit(); - NOT_PRODUCT( verify_graph_edges(); ) print_method(PHASE_AFTER_PARSING, 1); @@ -2484,8 +2475,6 @@ void Compile::Optimize() { check_no_dead_use(); - process_print_inlining(); - // We will never use the NodeHash table any more. Clear it so that final_graph_reshaping does not have // to remove hashes to unlock nodes for modifications. C->node_hash()->clear(); @@ -4439,126 +4428,8 @@ Node* Compile::constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* return phase->transform(new ConvI2LNode(value, ltype)); } -// The message about the current inlining is accumulated in -// _print_inlining_stream and transferred into the _print_inlining_list -// once we know whether inlining succeeds or not. For regular -// inlining, messages are appended to the buffer pointed by -// _print_inlining_idx in the _print_inlining_list. For late inlining, -// a new buffer is added after _print_inlining_idx in the list. This -// way we can update the inlining message for late inlining call site -// when the inlining is attempted again. -void Compile::print_inlining_init() { - if (print_inlining() || print_intrinsics()) { - // print_inlining_init is actually called several times. - print_inlining_reset(); - _print_inlining_list = new (comp_arena())GrowableArray(comp_arena(), 1, 1, new PrintInliningBuffer()); - } -} - -void Compile::print_inlining_reinit() { - if (print_inlining() || print_intrinsics()) { - print_inlining_reset(); - } -} - -void Compile::print_inlining_reset() { - _print_inlining_stream->reset(); -} - -void Compile::print_inlining_commit() { - assert(print_inlining() || print_intrinsics(), "PrintInlining off?"); - // Transfer the message from _print_inlining_stream to the current - // _print_inlining_list buffer and clear _print_inlining_stream. - _print_inlining_list->at(_print_inlining_idx)->ss()->write(_print_inlining_stream->base(), _print_inlining_stream->size()); - print_inlining_reset(); -} - -void Compile::print_inlining_push() { - // Add new buffer to the _print_inlining_list at current position - _print_inlining_idx++; - _print_inlining_list->insert_before(_print_inlining_idx, new PrintInliningBuffer()); -} - -Compile::PrintInliningBuffer* Compile::print_inlining_current() { - return _print_inlining_list->at(_print_inlining_idx); -} - -void Compile::print_inlining_update(CallGenerator* cg) { - if (print_inlining() || print_intrinsics()) { - if (cg->is_late_inline()) { - if (print_inlining_current()->cg() != cg && - (print_inlining_current()->cg() != nullptr || - print_inlining_current()->ss()->size() != 0)) { - print_inlining_push(); - } - print_inlining_commit(); - print_inlining_current()->set_cg(cg); - } else { - if (print_inlining_current()->cg() != nullptr) { - print_inlining_push(); - } - print_inlining_commit(); - } - } -} - -void Compile::print_inlining_move_to(CallGenerator* cg) { - // We resume inlining at a late inlining call site. Locate the - // corresponding inlining buffer so that we can update it. - if (print_inlining() || print_intrinsics()) { - for (int i = 0; i < _print_inlining_list->length(); i++) { - if (_print_inlining_list->at(i)->cg() == cg) { - _print_inlining_idx = i; - return; - } - } - ShouldNotReachHere(); - } -} - -void Compile::print_inlining_update_delayed(CallGenerator* cg) { - if (print_inlining() || print_intrinsics()) { - assert(_print_inlining_stream->size() > 0, "missing inlining msg"); - assert(print_inlining_current()->cg() == cg, "wrong entry"); - // replace message with new message - _print_inlining_list->at_put(_print_inlining_idx, new PrintInliningBuffer()); - print_inlining_commit(); - print_inlining_current()->set_cg(cg); - } -} - -void Compile::print_inlining_assert_ready() { - assert(!_print_inlining || _print_inlining_stream->size() == 0, "losing data"); -} - -void Compile::process_print_inlining() { - assert(_late_inlines.length() == 0, "not drained yet"); - if (print_inlining() || print_intrinsics()) { - ResourceMark rm; - stringStream ss; - assert(_print_inlining_list != nullptr, "process_print_inlining should be called only once."); - for (int i = 0; i < _print_inlining_list->length(); i++) { - PrintInliningBuffer* pib = _print_inlining_list->at(i); - ss.print("%s", pib->ss()->freeze()); - delete pib; - DEBUG_ONLY(_print_inlining_list->at_put(i, nullptr)); - } - // Reset _print_inlining_list, it only contains destructed objects. - // It is on the arena, so it will be freed when the arena is reset. - _print_inlining_list = nullptr; - // _print_inlining_stream won't be used anymore, either. - print_inlining_reset(); - size_t end = ss.size(); - _print_inlining_output = NEW_ARENA_ARRAY(comp_arena(), char, end+1); - strncpy(_print_inlining_output, ss.freeze(), end+1); - _print_inlining_output[end] = 0; - } -} - void Compile::dump_print_inlining() { - if (_print_inlining_output != nullptr) { - tty->print_raw(_print_inlining_output); - } + inline_printer()->print_on(tty); } void Compile::log_late_inline(CallGenerator* cg) { diff --git a/src/hotspot/share/opto/compile.hpp b/src/hotspot/share/opto/compile.hpp index 223e703376103..9325289120704 100644 --- a/src/hotspot/share/opto/compile.hpp +++ b/src/hotspot/share/opto/compile.hpp @@ -46,6 +46,7 @@ #include "runtime/vmThread.hpp" #include "utilities/ticks.hpp" #include "utilities/vmEnums.hpp" +#include "opto/printinlining.hpp" class AbstractLockNode; class AddPNode; @@ -472,29 +473,6 @@ class Compile : public Phase { // "MemLimit" directive was specified and the memory limit was hit during compilation bool _oom; - // Inlining may not happen in parse order which would make - // PrintInlining output confusing. Keep track of PrintInlining - // pieces in order. - class PrintInliningBuffer : public CHeapObj { - private: - CallGenerator* _cg; - stringStream _ss; - static const size_t default_stream_buffer_size = 128; - - public: - PrintInliningBuffer() - : _cg(nullptr), _ss(default_stream_buffer_size) {} - - stringStream* ss() { return &_ss; } - CallGenerator* cg() { return _cg; } - void set_cg(CallGenerator* cg) { _cg = cg; } - }; - - stringStream* _print_inlining_stream; - GrowableArray* _print_inlining_list; - int _print_inlining_idx; - char* _print_inlining_output; - // Only keep nodes in the expensive node list that need to be optimized void cleanup_expensive_nodes(PhaseIterGVN &igvn); // Use for sorting expensive nodes to bring similar nodes together @@ -506,37 +484,17 @@ class Compile : public Phase { void* _replay_inline_data; // Pointer to data loaded from file - void print_inlining_init(); - void print_inlining_reinit(); - void print_inlining_commit(); - void print_inlining_push(); - PrintInliningBuffer* print_inlining_current(); - void log_late_inline_failure(CallGenerator* cg, const char* msg); DEBUG_ONLY(bool _exception_backedge;) void record_method_not_compilable_oom(); - public: + InlinePrinter _inline_printer; +public: void* barrier_set_state() const { return _barrier_set_state; } - stringStream* print_inlining_stream() { - assert(print_inlining() || print_intrinsics(), "PrintInlining off?"); - return _print_inlining_stream; - } - - void print_inlining_update(CallGenerator* cg); - void print_inlining_update_delayed(CallGenerator* cg); - void print_inlining_move_to(CallGenerator* cg); - void print_inlining_assert_ready(); - void print_inlining_reset(); - - void print_inlining(ciMethod* method, int inline_level, int bci, InliningResult result, const char* msg = nullptr) { - stringStream ss; - CompileTask::print_inlining_inner(&ss, method, inline_level, bci, result, msg); - print_inlining_stream()->print("%s", ss.freeze()); - } + InlinePrinter* inline_printer() { return &_inline_printer; } #ifndef PRODUCT IdealGraphPrinter* igv_printer() { return _igv_printer; } @@ -1100,7 +1058,6 @@ class Compile : public Phase { void remove_useless_coarsened_locks(Unique_Node_List& useful); - void process_print_inlining(); void dump_print_inlining(); bool over_inlining_cutoff() const { diff --git a/src/hotspot/share/opto/doCall.cpp b/src/hotspot/share/opto/doCall.cpp index 68a799fc6f355..736ed4e676d1e 100644 --- a/src/hotspot/share/opto/doCall.cpp +++ b/src/hotspot/share/opto/doCall.cpp @@ -49,33 +49,40 @@ #include "jfr/jfr.hpp" #endif -static void print_trace_type_profile(outputStream* out, int depth, ciKlass* prof_klass, int site_count, int receiver_count) { - CompileTask::print_inline_indent(depth, out); +static void print_trace_type_profile(outputStream* out, int depth, ciKlass* prof_klass, int site_count, int receiver_count, + bool with_deco) { + if (with_deco) { + CompileTask::print_inline_indent(depth, out); + } out->print(" \\-> TypeProfile (%d/%d counts) = ", receiver_count, site_count); prof_klass->name()->print_symbol_on(out); - out->cr(); + if (with_deco) { + out->cr(); + } } -static void trace_type_profile(Compile* C, ciMethod* method, int depth, int bci, ciMethod* prof_method, - ciKlass* prof_klass, int site_count, int receiver_count) { +static void trace_type_profile(Compile* C, ciMethod* method, JVMState* jvms, + ciMethod* prof_method, ciKlass* prof_klass, int site_count, int receiver_count) { + int depth = jvms->depth() - 1; + int bci = jvms->bci(); if (TraceTypeProfile || C->print_inlining()) { - outputStream* out = tty; if (!C->print_inlining()) { if (!PrintOpto && !PrintCompilation) { method->print_short_name(); tty->cr(); } CompileTask::print_inlining_tty(prof_method, depth, bci, InliningResult::SUCCESS); + print_trace_type_profile(tty, depth, prof_klass, site_count, receiver_count, true); } else { - out = C->print_inlining_stream(); + auto stream = C->inline_printer()->record(method, jvms, InliningResult::SUCCESS); + print_trace_type_profile(stream, depth, prof_klass, site_count, receiver_count, false); } - print_trace_type_profile(out, depth, prof_klass, site_count, receiver_count); } LogTarget(Debug, jit, inlining) lt; if (lt.is_enabled()) { LogStream ls(lt); - print_trace_type_profile(&ls, depth, prof_klass, site_count, receiver_count); + print_trace_type_profile(&ls, depth, prof_klass, site_count, receiver_count, true); } } @@ -294,17 +301,19 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool if (miss_cg != nullptr) { if (next_hit_cg != nullptr) { assert(speculative_receiver_type == nullptr, "shouldn't end up here if we used speculation"); - trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1)); + trace_type_profile(C, jvms->method(), jvms, next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1)); // We don't need to record dependency on a receiver here and below. // Whenever we inline, the dependency is added by Parse::Parse(). miss_cg = CallGenerator::for_predicted_call(profile.receiver(1), miss_cg, next_hit_cg, PROB_MAX); } if (miss_cg != nullptr) { ciKlass* k = speculative_receiver_type != nullptr ? speculative_receiver_type : profile.receiver(0); - trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), receiver_method, k, site_count, receiver_count); + trace_type_profile(C, jvms->method(), jvms, receiver_method, k, site_count, receiver_count); float hit_prob = speculative_receiver_type != nullptr ? 1.0 : profile.receiver_prob(0); CallGenerator* cg = CallGenerator::for_predicted_call(k, miss_cg, hit_cg, hit_prob); - if (cg != nullptr) return cg; + if (cg != nullptr) { + return cg; + } } } } @@ -371,9 +380,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool // Use a more generic tactic, like a simple call. if (call_does_dispatch) { const char* msg = "virtual call"; - if (C->print_inlining()) { - print_inlining(callee, jvms->depth() - 1, jvms->bci(), InliningResult::FAILURE, msg); - } + C->inline_printer()->record(callee, jvms, InliningResult::FAILURE, msg); C->log_inline_failure(msg); if (IncrementalInlineVirtual && allow_inline) { return CallGenerator::for_late_inline_virtual(callee, vtable_index, prof_factor); // attempt to inline through virtual call later @@ -512,8 +519,6 @@ void Parse::do_call() { // our contribution to it is cleaned up right here. kill_dead_locals(); - C->print_inlining_assert_ready(); - // Set frequently used booleans const bool is_virtual = bc() == Bytecodes::_invokevirtual; const bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface; diff --git a/src/hotspot/share/opto/library_call.cpp b/src/hotspot/share/opto/library_call.cpp index 9ed7bea750de7..7ef030ccd7d4d 100644 --- a/src/hotspot/share/opto/library_call.cpp +++ b/src/hotspot/share/opto/library_call.cpp @@ -119,9 +119,7 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms) { const char *inline_msg = is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)"; CompileTask::print_inlining_ul(callee, jvms->depth() - 1, bci, InliningResult::SUCCESS, inline_msg); - if (C->print_intrinsics() || C->print_inlining()) { - C->print_inlining(callee, jvms->depth() - 1, bci, InliningResult::SUCCESS, inline_msg); - } + C->inline_printer()->record(callee, jvms, InliningResult::SUCCESS, inline_msg); C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked); if (C->log()) { C->log()->elem("intrinsic id='%s'%s nodes='%d'", @@ -131,7 +129,6 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms) { } // Push the result from the inlined method onto the stack. kit.push_result(); - C->print_inlining_update(this); return kit.transfer_exceptions_into_jvms(); } @@ -147,9 +144,7 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms) { : "failed to inline (intrinsic), method not annotated"; } CompileTask::print_inlining_ul(callee, jvms->depth() - 1, bci, InliningResult::FAILURE, msg); - if (C->print_intrinsics() || C->print_inlining()) { - C->print_inlining(callee, jvms->depth() - 1, bci, InliningResult::FAILURE, msg); - } + C->inline_printer()->record(callee, jvms, InliningResult::FAILURE, msg); } else { // Root compile ResourceMark rm; @@ -164,7 +159,6 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms) { } } C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed); - C->print_inlining_update(this); return nullptr; } @@ -190,9 +184,8 @@ Node* LibraryIntrinsic::generate_predicate(JVMState* jvms, int predicate) { const char *inline_msg = is_virtual() ? "(intrinsic, virtual, predicate)" : "(intrinsic, predicate)"; CompileTask::print_inlining_ul(callee, jvms->depth() - 1, bci, InliningResult::SUCCESS, inline_msg); - if (C->print_intrinsics() || C->print_inlining()) { - C->print_inlining(callee, jvms->depth() - 1, bci, InliningResult::SUCCESS, inline_msg); - } + C->inline_printer()->record(callee, jvms, InliningResult::SUCCESS, inline_msg); + C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked); if (C->log()) { C->log()->elem("predicate_intrinsic id='%s'%s nodes='%d'", @@ -208,9 +201,7 @@ Node* LibraryIntrinsic::generate_predicate(JVMState* jvms, int predicate) { // Not a root compile. const char* msg = "failed to generate predicate for intrinsic"; CompileTask::print_inlining_ul(kit.callee(), jvms->depth() - 1, bci, InliningResult::FAILURE, msg); - if (C->print_intrinsics() || C->print_inlining()) { - C->print_inlining(kit.callee(), jvms->depth() - 1, bci, InliningResult::FAILURE, msg); - } + C->inline_printer()->record(kit.callee(), jvms, InliningResult::FAILURE, msg); } else { // Root compile ResourceMark rm; @@ -220,9 +211,7 @@ Node* LibraryIntrinsic::generate_predicate(JVMState* jvms, int predicate) { is_virtual() ? " (virtual)" : "", bci); const char *msg = msg_stream.freeze(); log_debug(jit, inlining)("%s", msg); - if (C->print_intrinsics() || C->print_inlining()) { - C->print_inlining_stream()->print("%s", msg); - } + C->inline_printer()->record(kit.callee(), jvms, InliningResult::FAILURE, msg); } C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed); return nullptr; @@ -3772,6 +3761,20 @@ bool LibraryCallKit::inline_native_Continuation_pinning(bool unpin) { Node* test_pin_count_over_underflow = _gvn.transform(new BoolNode(pin_count_cmp, BoolTest::eq)); IfNode* iff_pin_count_over_underflow = create_and_map_if(control(), test_pin_count_over_underflow, PROB_MIN, COUNT_UNKNOWN); + // True branch, pin count over/underflow. + Node* pin_count_over_underflow = _gvn.transform(new IfTrueNode(iff_pin_count_over_underflow)); + { + // Trap (but not deoptimize (Action_none)) and continue in the interpreter + // which will throw IllegalStateException for pin count over/underflow. + // No memory changed so far - we can use memory create by reset_memory() + // at the beginning of this intrinsic. No need to call reset_memory() again. + PreserveJVMState pjvms(this); + set_control(pin_count_over_underflow); + uncommon_trap(Deoptimization::Reason_intrinsic, + Deoptimization::Action_none); + assert(stopped(), "invariant"); + } + // False branch, no pin count over/underflow. Increment or decrement pin count and store back. Node* valid_pin_count = _gvn.transform(new IfFalseNode(iff_pin_count_over_underflow)); set_control(valid_pin_count); @@ -3783,20 +3786,7 @@ bool LibraryCallKit::inline_native_Continuation_pinning(bool unpin) { next_pin_count = _gvn.transform(new AddINode(pin_count, _gvn.intcon(1))); } - Node* updated_pin_count_memory = store_to_memory(control(), pin_count_offset, next_pin_count, T_INT, MemNode::unordered); - - // True branch, pin count over/underflow. - Node* pin_count_over_underflow = _gvn.transform(new IfTrueNode(iff_pin_count_over_underflow)); - { - // Trap (but not deoptimize (Action_none)) and continue in the interpreter - // which will throw IllegalStateException for pin count over/underflow. - PreserveJVMState pjvms(this); - set_control(pin_count_over_underflow); - set_all_memory(input_memory_state); - uncommon_trap_exact(Deoptimization::Reason_intrinsic, - Deoptimization::Action_none); - assert(stopped(), "invariant"); - } + store_to_memory(control(), pin_count_offset, next_pin_count, T_INT, MemNode::unordered); // Result of top level CFG and Memory. RegionNode* result_rgn = new RegionNode(PATH_LIMIT); @@ -3806,7 +3796,7 @@ bool LibraryCallKit::inline_native_Continuation_pinning(bool unpin) { result_rgn->init_req(_true_path, _gvn.transform(valid_pin_count)); result_rgn->init_req(_false_path, _gvn.transform(continuation_is_null)); - result_mem->init_req(_true_path, _gvn.transform(updated_pin_count_memory)); + result_mem->init_req(_true_path, _gvn.transform(reset_memory())); result_mem->init_req(_false_path, _gvn.transform(input_memory_state)); // Set output state. @@ -4302,7 +4292,12 @@ Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, if (obj != nullptr && is_array_ctrl != nullptr && is_array_ctrl != top()) { // Keep track of the fact that 'obj' is an array to prevent // array specific accesses from floating above the guard. - *obj = _gvn.transform(new CastPPNode(is_array_ctrl, *obj, TypeAryPtr::BOTTOM)); + Node* cast = _gvn.transform(new CastPPNode(is_array_ctrl, *obj, TypeAryPtr::BOTTOM)); + // Check for top because in rare cases, the type system can determine that + // the object can't be an array but the layout helper check is not folded. + if (!cast->is_top()) { + *obj = cast; + } } return ctrl; } diff --git a/src/hotspot/share/opto/library_call.hpp b/src/hotspot/share/opto/library_call.hpp index c3edebd367bf2..df9c858c5deac 100644 --- a/src/hotspot/share/opto/library_call.hpp +++ b/src/hotspot/share/opto/library_call.hpp @@ -106,6 +106,10 @@ class LibraryCallKit : public GraphKit { void push_result() { // Push the result onto the stack. if (!stopped() && result() != nullptr) { + if (result()->is_top()) { + assert(false, "Can't determine return value."); + C->record_method_not_compilable("Can't determine return value."); + } BasicType bt = result()->bottom_type()->basic_type(); push_node(bt, result()); } diff --git a/src/hotspot/share/opto/node.cpp b/src/hotspot/share/opto/node.cpp index 89e7ead2c7619..583d0ba7601e3 100644 --- a/src/hotspot/share/opto/node.cpp +++ b/src/hotspot/share/opto/node.cpp @@ -547,10 +547,6 @@ Node *Node::clone() const { if (cg != nullptr) { CallGenerator* cloned_cg = cg->with_call_node(n->as_Call()); n->as_Call()->set_generator(cloned_cg); - - C->print_inlining_assert_ready(); - C->print_inlining_move_to(cg); - C->print_inlining_update(cloned_cg); } } if (n->is_SafePoint()) { diff --git a/src/hotspot/share/opto/parse.hpp b/src/hotspot/share/opto/parse.hpp index 579e0a5321196..83b211828ce5c 100644 --- a/src/hotspot/share/opto/parse.hpp +++ b/src/hotspot/share/opto/parse.hpp @@ -75,7 +75,7 @@ class InlineTree : public AnyObj { bool& should_delay); bool should_inline(ciMethod* callee_method, ciMethod* caller_method, - int caller_bci, + JVMState* caller_jvms, bool& should_delay, ciCallProfile& profile); bool should_not_inline(ciMethod* callee_method, @@ -87,8 +87,7 @@ class InlineTree : public AnyObj { ciMethod* caller_method, int caller_bci, ciCallProfile& profile); - void print_inlining(ciMethod* callee_method, int caller_bci, - ciMethod* caller_method, bool success) const; + void print_inlining(ciMethod* callee_method, JVMState* jvm, bool success) const; InlineTree* caller_tree() const { return _caller_tree; } InlineTree* callee_at(int bci, ciMethod* m) const; diff --git a/src/hotspot/share/opto/printinlining.cpp b/src/hotspot/share/opto/printinlining.cpp new file mode 100644 index 0000000000000..accc3dcc63769 --- /dev/null +++ b/src/hotspot/share/opto/printinlining.cpp @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "opto/printinlining.hpp" +#include "opto/callnode.hpp" +#include "memory/allocation.hpp" +#include "memory/resourceArea.hpp" + +bool InlinePrinter::is_enabled() const { + return C->print_intrinsics() || C->print_inlining(); +} + +outputStream* InlinePrinter::record(ciMethod* callee, JVMState* state, InliningResult result, const char* msg) { + if (!is_enabled()) { + return &_nullStream; + } + outputStream* stream = locate(state, callee)->add(result); + if (msg != nullptr) { + stream->print("%s", msg); + } + return stream; // Pointer stays valid, see IPInlineSite::add() +} + +void InlinePrinter::print_on(outputStream* tty) const { + if (!is_enabled()) { + return; + } + _root.dump(tty, -1); +} + +InlinePrinter::IPInlineSite* InlinePrinter::locate(JVMState* state, ciMethod* callee) { + auto growableArray = new GrowableArrayCHeap(2); + + while (state != nullptr) { + growableArray->push(state); + state = state->caller(); + } + + IPInlineSite* site = &_root; + for (int i = growableArray->length() - 1; i >= 0; i--) { + site = &site->at_bci(growableArray->at(i)->bci(), i == 0 ? callee : nullptr); + } + + delete growableArray; + + return site; +} + +InlinePrinter::IPInlineSite& InlinePrinter::IPInlineSite::at_bci(int bci, ciMethod* callee) { + auto find_result = _children.find(bci); + IPInlineSite& child = find_result.node->val(); + + if (find_result.new_node) { + assert(callee != nullptr, "an inline call is missing in the chain up to the root"); + child.set_source(callee, bci); + } else { // We already saw a call at this site before + if (callee != nullptr && callee != child._method) { + outputStream* stream = child.add(InliningResult::SUCCESS); + stream->print("callee changed to "); + CompileTask::print_inline_inner_method_info(stream, callee); + } + } + + return child; +} + +outputStream* InlinePrinter::IPInlineSite::add(InliningResult result) { + _attempts.push(IPInlineAttempt(result)); + return _attempts.last().make_stream(); +} + +void InlinePrinter::IPInlineSite::dump(outputStream* tty, int level) const { + assert(_bci != -999, "trying to dump site without source"); + + if (_attempts.is_nonempty()) { + CompileTask::print_inlining_header(tty, _method, level, _bci); + } + for (int i = 0; i < _attempts.length(); i++) { + CompileTask::print_inlining_inner_message(tty, _attempts.at(i).result(), _attempts.at(i).stream()->base()); + } + if (_attempts.is_nonempty()) { + tty->cr(); + } + + _children.visit_in_order([=](auto* node) { + node->val().dump(tty, level + 1); + }); +} diff --git a/src/hotspot/share/opto/printinlining.hpp b/src/hotspot/share/opto/printinlining.hpp new file mode 100644 index 0000000000000..ae79648319b43 --- /dev/null +++ b/src/hotspot/share/opto/printinlining.hpp @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef PRINTINLINING_HPP +#define PRINTINLINING_HPP + +#include "memory/allocation.hpp" +#include "utilities/ostream.hpp" +#include "utilities/growableArray.hpp" +#include "nmt/nmtTreap.hpp" + +class JVMState; +class ciMethod; +class Compile; +enum class InliningResult; + +// If not enabled, all method calls are no-ops. +class InlinePrinter { +private: + class IPInlineAttempt { + InliningResult _result; + stringStream* _stream; + + public: + IPInlineAttempt() : _stream(nullptr) {} + + IPInlineAttempt(InliningResult result) : _result(result), _stream(nullptr) {} + + InliningResult result() const { return _result; } + + stringStream* make_stream() { + assert(_stream == nullptr, "stream already exists"); + _stream = new (mtCompiler) stringStream; + return _stream; + } + + stringStream* stream() const { + assert(_stream != nullptr, "stream was not created yet!"); + return _stream; + } + + void deallocate_stream() { + delete _stream; + _stream = nullptr; + } + }; + + struct Cmp { + static int cmp(int a, int b) { + return a - b; + } + }; + + class IPInlineSite : public CHeapObj { + private: + ciMethod* _method; + int _bci; + GrowableArrayCHeap _attempts; + TreapCHeap _children; + + public: + IPInlineSite(ciMethod* method, int bci) : _method(method), _bci(bci) {} + + IPInlineSite() : _method(nullptr), _bci(-999) {} + + ~IPInlineSite() { + // Since GrowableArrayCHeap uses copy semantics to resize itself we + // cannot free the stream inside IPInlineAttempt's destructor unfortunately + // and have to take care of this here instead. + for (int i = 0; i < _attempts.length(); i++) { + _attempts.at(i).deallocate_stream(); + } + } + + void set_source(ciMethod* method, int bci) { + _method = method; + _bci = bci; + } + + // Finds the node for an inline attempt that occurred inside this inline. + // If this is a new site, provide the callee otherwise null. + // Returned reference is valid until any at_bci is called with non-null callee. + IPInlineSite& at_bci(int bci, ciMethod* callee); + // The returned pointer stays valid until InlinePrinter is destructed. + outputStream* add(InliningResult result); + + void dump(outputStream* tty, int level) const; + }; + + bool is_enabled() const; + + Compile* C; + + // In case print inline is disabled, this null stream is returned from ::record() + nullStream _nullStream; + + // Locates the IPInlineSite node that corresponds to this JVM state. + // state may be null. In this case, the root node is returned. + // If this is a new site, provide the callee otherwise null. + // Returned pointer is valid until InlinePrinter is destructed. + IPInlineSite* locate(JVMState* state, ciMethod* callee); + + IPInlineSite _root{nullptr, 0}; + +public: + InlinePrinter(Compile* compile) : C(compile) {} + + // Saves the result of an inline attempt of method at state. + // An optional string message with more details that is copied to the stream for this attempt. Pointer is not captured. + // Returns an output stream which stores the message associated with this attempt. The buffer stays valid until InlinePrinter is destructed. + // You can print arbitrary information to this stream but do not add line breaks, as this will break formatting. + outputStream* record(ciMethod* callee, JVMState* state, InliningResult result, const char* msg = nullptr); + + // Prints all collected inlining information to the given output stream. + void print_on(outputStream* tty) const; +}; + +#endif // PRINTINLINING_HPP diff --git a/src/hotspot/share/opto/stringopts.cpp b/src/hotspot/share/opto/stringopts.cpp index f867fcf4d234c..793145e078d6d 100644 --- a/src/hotspot/share/opto/stringopts.cpp +++ b/src/hotspot/share/opto/stringopts.cpp @@ -173,9 +173,6 @@ class StringConcat : public ResourceObj { assert(!_control.contains(ctrl), "only push once"); _control.push(ctrl); } - bool has_control(Node* ctrl) { - return _control.contains(ctrl); - } void add_constructor(Node* init) { assert(!_constructors.contains(init), "only push once"); _constructors.push(init); @@ -410,66 +407,7 @@ Node_List PhaseStringOpts::collect_toString_calls() { return string_calls; } -PhaseStringOpts::ProcessAppendResult PhaseStringOpts::process_append_candidate(CallStaticJavaNode* cnode, - StringConcat* sc, - ciMethod* m, - ciSymbol* string_sig, - ciSymbol* int_sig, - ciSymbol* char_sig) { - if (cnode->method() != nullptr && !cnode->method()->is_static() && - cnode->method()->holder() == m->holder() && - cnode->method()->name() == ciSymbols::append_name() && - (cnode->method()->signature()->as_symbol() == string_sig || - cnode->method()->signature()->as_symbol() == char_sig || - cnode->method()->signature()->as_symbol() == int_sig)) { - if (sc->has_control(cnode)) { - return ProcessAppendResult::AppendWasAdded; - } - sc->add_control(cnode); - Node* arg = cnode->in(TypeFunc::Parms + 1); - if (arg == nullptr || arg->is_top()) { -#ifndef PRODUCT - if (PrintOptimizeStringConcat) { - tty->print("giving up because the call is effectively dead"); - cnode->jvms()->dump_spec(tty); - tty->cr(); - } -#endif - return ProcessAppendResult::AbortOptimization; - } - - if (cnode->method()->signature()->as_symbol() == int_sig) { - sc->push_int(arg); - } else if (cnode->method()->signature()->as_symbol() == char_sig) { - sc->push_char(arg); - } else if (arg->is_Proj() && arg->in(0)->is_CallStaticJava()) { - CallStaticJavaNode* csj = arg->in(0)->as_CallStaticJava(); - if (csj->method() != nullptr && - csj->method()->intrinsic_id() == vmIntrinsics::_Integer_toString && - arg->outcnt() == 1) { - // _control is the list of StringBuilder calls nodes which - // will be replaced by new String code after this optimization. - // Integer::toString() call is not part of StringBuilder calls - // chain. It could be eliminated only if its result is used - // only by this SB calls chain. - // Another limitation: it should be used only once because - // it is unknown that it is used only by this SB calls chain - // until all related SB calls nodes are collected. - assert(arg->unique_out() == cnode, "sanity"); - sc->add_control(csj); - sc->push_int(csj->in(TypeFunc::Parms)); - } else { - sc->push_string(arg); - } - } else { - sc->push_string(arg); - } - return ProcessAppendResult::AppendWasAdded; - } - return ProcessAppendResult::CandidateIsNotAppend; -} - -// Recognize fluent-chain and non-fluent uses of StringBuilder/Buffer. They are either explicit usages +// Recognize a fluent-chain of StringBuilder/Buffer. They are either explicit usages // of them or the legacy bytecodes of string concatenation prior to JEP-280. eg. // // String result = new StringBuilder() @@ -478,17 +416,18 @@ PhaseStringOpts::ProcessAppendResult PhaseStringOpts::process_append_candidate(C // .append(123) // .toString(); // "foobar123" // -// Fluent-chains are recognized by walking upwards along the receivers, starting from toString(). -// Once the allocation of the StringBuilder has been reached, DU pairs are examined to find the -// constructor and non-fluent uses of the StringBuilder such as in this example: +// PS: Only a certain subset of constructor and append methods are acceptable. +// The criterion is that the length of argument is easy to work out in this phrase. +// It will drop complex cases such as Object. // +// Since it walks along the receivers of fluent-chain, it will give up if the codeshape is +// not "fluent" enough. eg. // StringBuilder sb = new StringBuilder(); // sb.append("foo"); // sb.toString(); // -// PS: Only a certain subset of constructor and append methods are acceptable. -// The criterion is that the length of argument is easy to work out in this phrase. -// It will drop complex cases such as Object. +// The receiver of toString method is the result of Allocation Node(CheckCastPP). +// The append method is overlooked. It will fail at validate_control_flow() test. // StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) { ciMethod* m = call->method(); @@ -527,7 +466,7 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) { if (cnode == nullptr) { alloc = recv->isa_Allocate(); if (alloc == nullptr) { - return nullptr; + break; } // Find the constructor call Node* result = alloc->result_cast(); @@ -539,7 +478,7 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) { alloc->jvms()->dump_spec(tty); tty->cr(); } #endif - return nullptr; + break; } Node* constructor = nullptr; for (SimpleDUIterator i(result); i.has_next(); i.next()) { @@ -550,10 +489,6 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) { use->method()->name() == ciSymbols::object_initializer_name() && use->method()->holder() == m->holder()) { // Matched the constructor. - if (constructor != nullptr) { - // The constructor again. We must only process it once. - continue; - } ciSymbol* sig = use->method()->signature()->as_symbol(); if (sig == ciSymbols::void_method_signature() || sig == ciSymbols::int_void_signature() || @@ -607,16 +542,7 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) { } #endif } - } else if (use != nullptr) { - if (process_append_candidate(use, sc, m, string_sig, int_sig, char_sig) == ProcessAppendResult::AbortOptimization) { - // We must abort if process_append_candidate tells us to... - return nullptr; - } - // ...but we do not care if we really found an append or not: - // - If we found an append, that's perfect. Nothing further to do. - // - If this is a call to an unrelated method, validate_mem_flow() (and validate_control_flow()) - // will later check if this call prevents the optimization. So nothing to do here. - // We will continue to look for the constructor (if not found already) and appends. + break; } } if (constructor == nullptr) { @@ -627,7 +553,7 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) { alloc->jvms()->dump_spec(tty); tty->cr(); } #endif - return nullptr; + break; } // Walked all the way back and found the constructor call so see @@ -642,23 +568,62 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) { } else { return nullptr; } - } else { - ProcessAppendResult result = process_append_candidate(cnode, sc, m, string_sig, int_sig, char_sig); - - if (result == ProcessAppendResult::AbortOptimization) { - return nullptr; - } else if (result == ProcessAppendResult::CandidateIsNotAppend) { - // some unhandled signature + } else if (cnode->method() == nullptr) { + break; + } else if (!cnode->method()->is_static() && + cnode->method()->holder() == m->holder() && + cnode->method()->name() == ciSymbols::append_name() && + (cnode->method()->signature()->as_symbol() == string_sig || + cnode->method()->signature()->as_symbol() == char_sig || + cnode->method()->signature()->as_symbol() == int_sig)) { + sc->add_control(cnode); + Node* arg = cnode->in(TypeFunc::Parms + 1); + if (arg == nullptr || arg->is_top()) { #ifndef PRODUCT if (PrintOptimizeStringConcat) { - tty->print("giving up because encountered unexpected signature "); - cnode->tf()->dump(); - tty->cr(); - cnode->in(TypeFunc::Parms + 1)->dump(); + tty->print("giving up because the call is effectively dead"); + cnode->jvms()->dump_spec(tty); tty->cr(); } #endif - return nullptr; + break; } + if (cnode->method()->signature()->as_symbol() == int_sig) { + sc->push_int(arg); + } else if (cnode->method()->signature()->as_symbol() == char_sig) { + sc->push_char(arg); + } else { + if (arg->is_Proj() && arg->in(0)->is_CallStaticJava()) { + CallStaticJavaNode* csj = arg->in(0)->as_CallStaticJava(); + if (csj->method() != nullptr && + csj->method()->intrinsic_id() == vmIntrinsics::_Integer_toString && + arg->outcnt() == 1) { + // _control is the list of StringBuilder calls nodes which + // will be replaced by new String code after this optimization. + // Integer::toString() call is not part of StringBuilder calls + // chain. It could be eliminated only if its result is used + // only by this SB calls chain. + // Another limitation: it should be used only once because + // it is unknown that it is used only by this SB calls chain + // until all related SB calls nodes are collected. + assert(arg->unique_out() == cnode, "sanity"); + sc->add_control(csj); + sc->push_int(csj->in(TypeFunc::Parms)); + continue; + } + } + sc->push_string(arg); + } + continue; + } else { + // some unhandled signature +#ifndef PRODUCT + if (PrintOptimizeStringConcat) { + tty->print("giving up because encountered unexpected signature "); + cnode->tf()->dump(); tty->cr(); + cnode->in(TypeFunc::Parms + 1)->dump(); + } +#endif + break; } } return nullptr; diff --git a/src/hotspot/share/opto/stringopts.hpp b/src/hotspot/share/opto/stringopts.hpp index 99d554838d795..21be4109c7d0d 100644 --- a/src/hotspot/share/opto/stringopts.hpp +++ b/src/hotspot/share/opto/stringopts.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2009, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,7 +34,7 @@ class IdealVariable; class PhaseStringOpts : public Phase { friend class StringConcat; -private: + private: PhaseGVN* _gvn; // List of dead nodes to clean up aggressively at the end @@ -53,23 +53,6 @@ class PhaseStringOpts : public Phase { // a single string construction. StringConcat* build_candidate(CallStaticJavaNode* call); - enum class ProcessAppendResult { - // Indicates that the candidate was indeed an append and process_append_candidate processed it - // accordingly (added it to the StringConcat etc.) - AppendWasAdded, - // The candidate turned out not to be an append call. process_append_candidate did not do anything. - CandidateIsNotAppend, - // The candidate is an append call, but circumstances completely preventing string concat - // optimization were detected and the optimization must abort. - AbortOptimization - }; - - // Called from build_candidate. Looks at an "append candidate", a call that might be a call - // to StringBuilder::append. If so, adds it to the StringConcat. - ProcessAppendResult process_append_candidate(CallStaticJavaNode* cnode, StringConcat* sc, - ciMethod* m, ciSymbol* string_sig, ciSymbol* int_sig, - ciSymbol* char_sig); - // Replace all the SB calls in concat with an optimization String allocation void replace_string_concat(StringConcat* concat); @@ -122,13 +105,12 @@ class PhaseStringOpts : public Phase { unroll_string_copy_length = 6 }; -public: + public: PhaseStringOpts(PhaseGVN* gvn); #ifndef PRODUCT static void print_statistics(); - -private: + private: static uint _stropts_replaced; static uint _stropts_merged; static uint _stropts_total; diff --git a/src/hotspot/share/prims/methodHandles.cpp b/src/hotspot/share/prims/methodHandles.cpp index 97e3eae1a2f0e..246e2cdbb132d 100644 --- a/src/hotspot/share/prims/methodHandles.cpp +++ b/src/hotspot/share/prims/methodHandles.cpp @@ -931,19 +931,12 @@ void MethodHandles::expand_MemberName(Handle mname, int suppress, TRAPS) { void MethodHandles::add_dependent_nmethod(oop call_site, nmethod* nm) { assert_locked_or_safepoint(CodeCache_lock); - oop context = java_lang_invoke_CallSite::context_no_keepalive(call_site); - DependencyContext deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context); - // Try to purge stale entries on updates. - // Since GC doesn't clean dependency contexts rooted at CallSiteContext objects, - // in order to avoid memory leak, stale entries are purged whenever a dependency list - // is changed (both on addition and removal). Though memory reclamation is delayed, - // it avoids indefinite memory usage growth. + DependencyContext deps = java_lang_invoke_CallSite::vmdependencies(call_site); deps.add_dependent_nmethod(nm); } void MethodHandles::clean_dependency_context(oop call_site) { - oop context = java_lang_invoke_CallSite::context_no_keepalive(call_site); - DependencyContext deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context); + DependencyContext deps = java_lang_invoke_CallSite::vmdependencies(call_site); deps.clean_unloading_dependents(); } @@ -955,8 +948,7 @@ void MethodHandles::mark_dependent_nmethods(DeoptimizationScope* deopt_scope, Ha NoSafepointVerifier nsv; MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag); - oop context = java_lang_invoke_CallSite::context_no_keepalive(call_site()); - DependencyContext deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context); + DependencyContext deps = java_lang_invoke_CallSite::vmdependencies(call_site()); deps.mark_dependent_nmethods(deopt_scope, changes); } } @@ -1321,23 +1313,6 @@ JVM_ENTRY(void, MHN_copyOutBootstrapArguments(JNIEnv* env, jobject igcls, } JVM_END -// It is called by a Cleaner object which ensures that dropped CallSites properly -// deallocate their dependency information. -JVM_ENTRY(void, MHN_clearCallSiteContext(JNIEnv* env, jobject igcls, jobject context_jh)) { - Handle context(THREAD, JNIHandles::resolve_non_null(context_jh)); - DeoptimizationScope deopt_scope; - { - NoSafepointVerifier nsv; - MutexLocker ml(THREAD, CodeCache_lock, Mutex::_no_safepoint_check_flag); - DependencyContext deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context()); - deps.remove_and_mark_for_deoptimization_all_dependents(&deopt_scope); - // This is assumed to be an 'atomic' operation by verification. - // So keep it under lock for now. - deopt_scope.deoptimize_marked(); - } -} -JVM_END - /** * Throws a java/lang/UnsupportedOperationException unconditionally. * This is required by the specification of MethodHandle.invoke if @@ -1384,7 +1359,6 @@ JVM_END #define MT JLINV "MethodType;" #define MH JLINV "MethodHandle;" #define MEM JLINV "MemberName;" -#define CTX JLINV "MethodHandleNatives$CallSiteContext;" #define CC (char*) /*cast a literal from (const char*)*/ #define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f) @@ -1400,7 +1374,6 @@ static JNINativeMethod MHN_methods[] = { {CC "setCallSiteTargetNormal", CC "(" CS "" MH ")V", FN_PTR(MHN_setCallSiteTargetNormal)}, {CC "setCallSiteTargetVolatile", CC "(" CS "" MH ")V", FN_PTR(MHN_setCallSiteTargetVolatile)}, {CC "copyOutBootstrapArguments", CC "(" CLS "[III[" OBJ "IZ" OBJ ")V", FN_PTR(MHN_copyOutBootstrapArguments)}, - {CC "clearCallSiteContext", CC "(" CTX ")V", FN_PTR(MHN_clearCallSiteContext)}, {CC "staticFieldOffset", CC "(" MEM ")J", FN_PTR(MHN_staticFieldOffset)}, {CC "staticFieldBase", CC "(" MEM ")" OBJ, FN_PTR(MHN_staticFieldBase)}, {CC "getMemberVMInfo", CC "(" MEM ")" OBJ, FN_PTR(MHN_getMemberVMInfo)} diff --git a/src/hotspot/share/runtime/stubCodeGenerator.cpp b/src/hotspot/share/runtime/stubCodeGenerator.cpp index d3dbe4337db3e..eafd2ad572e31 100644 --- a/src/hotspot/share/runtime/stubCodeGenerator.cpp +++ b/src/hotspot/share/runtime/stubCodeGenerator.cpp @@ -30,6 +30,7 @@ #include "prims/forte.hpp" #include "prims/jvmtiExport.hpp" #include "runtime/stubCodeGenerator.hpp" +#include "runtime/stubRoutines.hpp" // Implementation of StubCodeDesc @@ -68,6 +69,13 @@ void StubCodeDesc::print() const { print_on(tty); } StubCodeGenerator::StubCodeGenerator(CodeBuffer* code, bool print_code) { _masm = new MacroAssembler(code); + _blob_id = StubGenBlobId::NO_BLOBID; + _print_code = PrintStubCode || print_code; +} + +StubCodeGenerator::StubCodeGenerator(CodeBuffer* code, StubGenBlobId blob_id, bool print_code) { + _masm = new MacroAssembler(code); + _blob_id = blob_id; _print_code = PrintStubCode || print_code; } @@ -110,6 +118,11 @@ void StubCodeGenerator::stub_epilog(StubCodeDesc* cdesc) { } } +#ifdef ASSERT +void StubCodeGenerator::verify_stub(StubGenStubId stub_id) { + assert(StubRoutines::stub_to_blob(stub_id) == blob_id(), "wrong blob %s for generation of stub %s", StubRoutines::get_blob_name(blob_id()), StubRoutines::get_stub_name(stub_id)); +} +#endif // Implementation of CodeMark @@ -121,6 +134,12 @@ StubCodeMark::StubCodeMark(StubCodeGenerator* cgen, const char* group, const cha _cdesc->set_begin(_cgen->assembler()->pc()); } +StubCodeMark::StubCodeMark(StubCodeGenerator* cgen, StubGenStubId stub_id) : StubCodeMark(cgen, "StubRoutines", StubRoutines::get_stub_name(stub_id)) { +#ifdef ASSERT + cgen->verify_stub(stub_id); +#endif +} + StubCodeMark::~StubCodeMark() { _cgen->assembler()->flush(); _cdesc->set_end(_cgen->assembler()->pc()); diff --git a/src/hotspot/share/runtime/stubCodeGenerator.hpp b/src/hotspot/share/runtime/stubCodeGenerator.hpp index c085e9fc38cc4..41bd7e49b31e7 100644 --- a/src/hotspot/share/runtime/stubCodeGenerator.hpp +++ b/src/hotspot/share/runtime/stubCodeGenerator.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -96,42 +96,37 @@ class StubCodeDesc: public CHeapObj { void print() const; }; +// forward declare blob and stub id enums + +enum StubGenBlobId : int; +enum StubGenStubId : int; + // The base class for all stub-generating code generators. // Provides utility functions. class StubCodeGenerator: public StackObj { private: bool _print_code; - + StubGenBlobId _blob_id; protected: MacroAssembler* _masm; public: StubCodeGenerator(CodeBuffer* code, bool print_code = false); + StubCodeGenerator(CodeBuffer* code, StubGenBlobId blob_id, bool print_code = false); ~StubCodeGenerator(); MacroAssembler* assembler() const { return _masm; } + StubGenBlobId blob_id() { return _blob_id; } virtual void stub_prolog(StubCodeDesc* cdesc); // called by StubCodeMark constructor virtual void stub_epilog(StubCodeDesc* cdesc); // called by StubCodeMark destructor - enum StubsKind { - Initial_stubs, // Stubs used by Runtime, Interpreter and compiled code. - // Have to be generated very early during VM startup. - - Continuation_stubs, // Stubs used by virtual threads. - // Generated after GC barriers initialization but before - // Interpreter initialization. - - Compiler_stubs, // Intrinsics and other stubs used only by compiled code. - // Can be generated by compiler (C2/JVMCI) thread based on - // DelayCompilerStubsGeneration flag. - - Final_stubs // The rest of stubs. Generated at the end of VM init. - }; +#ifdef ASSERT + void verify_stub(StubGenStubId stub_id); +#endif }; - // Stack-allocated helper class used to associate a stub code with a name. // All stub code generating functions that use a StubCodeMark will be registered // in the global StubCodeDesc list and the generated stub code can be identified @@ -144,6 +139,7 @@ class StubCodeMark: public StackObj { public: StubCodeMark(StubCodeGenerator* cgen, const char* group, const char* name); + StubCodeMark(StubCodeGenerator* cgen, StubGenStubId stub_id); ~StubCodeMark(); }; diff --git a/src/hotspot/share/runtime/stubDeclarations.hpp b/src/hotspot/share/runtime/stubDeclarations.hpp index ccca14c61b5b8..05f27fd078a1d 100644 --- a/src/hotspot/share/runtime/stubDeclarations.hpp +++ b/src/hotspot/share/runtime/stubDeclarations.hpp @@ -1,6 +1,6 @@ /* - * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2024, Red Hat, Inc. All rights reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2025, Red Hat, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -47,6 +47,8 @@ #define SHARED_JFR_STUBS_DO(do_blob) #endif +// client macro to operate on shared stubs +// // do_blob(name, type) #define SHARED_STUBS_DO(do_blob) \ do_blob(deopt, DeoptimizationBlob*) \ @@ -73,6 +75,8 @@ // C1 stubs are always generated in a generic CodeBlob #ifdef COMPILER1 +// client macro to operate on c1 stubs +// // do_blob(name) #define C1_STUBS_DO(do_blob) \ do_blob(dtrace_object_alloc) \ @@ -135,6 +139,8 @@ #define C2_JVMTI_STUBS_DO(do_jvmti_stub) #endif // INCLUDE_JVMTI +// client macro to operate on c2 stubs +// // do_blob(name, type) // do_stub(name, fancy_jump, pass_tls, return_pc) // do_jvmti_stub(name) @@ -165,17 +171,897 @@ #define C2_STUBS_DO(do_blob, do_stub, do_jvmti_stub) #endif -// generate a stub or blob id enum tag from a name +// Stub Generator Blobs and Stubs Overview +// +// StubGenerator stubs do not require their own individual blob. They +// are generated in batches into one of four distinct BufferBlobs: +// +// 1) Initial stubs +// 2) Continuation stubs +// 3) Compiler stubs +// 4) Final stubs +// +// Creation of each successive BufferBlobs is staged to ensure that +// specific VM subsystems required by those stubs are suitably +// initialized before generated code attempt to reference data or +// addresses exported by those subsystems. The sequencing of +// initialization must be taken into account when adding a new stub +// declaration. +// +// StubGenerator stubs are declared using template macros, one set of +// declarations per blob (see below), with arch-specific stubs for any +// gven blob declared after generic stubs for that blob. Blobs are +// created in a fixed order during startup, which is reflected in the +// order of the declaration set. Stubs within a blob are currently +// created in an order determined by the arch-specific generator code +// which may not reflect the order of stub declarations. It is not +// straightforward to enforce a strict ordering. not least because +// arch-specific stub creation may need to be interleaved with generic +// stub creation. +// +// Blob and stub declaration templates are used to generate a variety +// of C++ code elements needed to manage stubs. +// +// Blob identifiers: +// +// public enum StubGenBlobId is generated to identify each of the +// StubGenerator blobs in blob declaration order. This enum is +// provided for use by client code to identify a specific blob. For a +// blob declared with name the associated enum value is +// StubGenBlobId::_id. +// +// Global stub identifiers: +// +// public enum StubGenStubId is generated to identify all declared +// stubs across all blobs, sorted first by blob declaration order and +// then within a blob by stub declaration order, generic stubs before +// arch-specific stubs. This enum is provided for use by client code +// to identify a specific stub, independent of the blob it belongs to. +// For a stub declared with name the associated enum value +// is StubGenStubId::_id. +// +// Blob-local stub identifiers: +// +// For each blob , public enum StubGenStubId_ is +// generated to enumerate all stubs within the blob in stub +// declaration order, generic stubs before arch-specific stubs. This +// enum is provided only in a non-product build and is intended for +// internal use by class StubRoutines to validate stub declarations. +// For a stub declared with name belonging to blob +// the associated enum value is +// StubGenStubId::__id. +// +// Stub names and associated getters: +// +// Two private static fields are generated to hold the names of the +// four generated blobs and all the generated stubs. +// +// const char* StubRoutines::_blob_names[]; +// const char* StubRoutines::_stub_names[]; +// +// The entry in _blob_names for a blob declared with name +// will be "". +// +// The entry in _stub_names for a stub declared with name +// will be "". +// +// Corresponding public static lookup methods are generated to allow +// names to be looked up by blob or global stub id. +// +// const char* StubRoutines::get_blob_name(StubGenBlobId id) +// const char* StubRoutines::get_stub_name(StubGenStubId id) +// +// These name lookup methods should be used by generic and +// cpu-specific client code to ensure that blobs and stubs are +// identified consistently. +// +// Blob code buffer sizes: +// +// An enumeration enum platform_dependent_constants is generated in +// the architecture specific StubRoutines header. For each blob named +// an associated enum tag is generated which defines the +// relevant size +// +// __stubs_code_size = , +// +// For example, +// +// enum platform_dependent_constants { +// _initial_stubs_code_size = 10000, +// _continuation_stubs_code_size = 2000, +// . . . +// +// Blob fields and associated getters: +// +// For each blob named a private field declaration will be +// generated: static field address StubRoutines::__stubs_code and +// a declaration provided to initialise it to nullptr. A corresponding +// public getter method address StubRoutines::__stubs_code() will +// be generated. +// +// Blob initialization routines: +// +// For each blob named an initalization function is defined +// which allows clients to schedule blob and stub generation during +// JVM bootstrap: +// +// void _stubs_init() { StubRoutines::initialize__stubs(); } +// +// A declaration and definition of each underlying implementation +// method StubRoutines::initialize__stubs() is also generated. +// +// Stub entry points and associated getters: +// +// Some generated stubs require their main entry point and, possibly, +// auxiliary entry points to be stored in fields declared either as +// members of class SharedRuntime. For stubs that are specific to a +// given cpu, the field needs to be declared in an arch-specific inner +// class of SharedRuntime. +// +// For a generic stub named the corresponding main entry usually +// has the same name: static field address StubRoutines::_ modulo +// an _ prefix. An associated getter method is also generated, again +// normally using the same name: address StubRoutines::() e.g. +// +// class StubRoutines { +// . . . +// static address _aescrypt_encryptBlock; +// . . . +// address aescrypt_encryptBlock() { return _aescrypt_encryptBlock; } +// +// Multiple fields and getters may be generated where a stub has more +// than one entry point, each provided with their own unique field and +// getter name e.g. +// +// . . . +// static address _call_stub; +// static address _call_stub_return_address; +// . . . +// static address call_stub_entry() { return _call_stub; } +// static address call_stub_return_address() { return _call_stub_return_address; } +// +// In special cases a stub may declare a (compile-time) fixed size +// array of entries, in which case an address array field is +// generated,along with a getter that accepts an index as argument: +// +// . . . +// static address _lookup_secondary_supers_table[Klass::SECONDARY_SUPERS_TABLE_SIZE]; +// . . . +// static address lookup_secondary_supers_table(int i); +// +// CPU-specific stub entry points and associated getters: +// +// For an arch-specific stub with name belonging to architecture +// private field address StubRoutines::::_ is +// generated to hold the entry address. An associated public getter +// method address StubRoutines::::() is also generated e.g. +// +// class StubRoutines { +// . . . +// class x86 { +// . . . +// static address _f2i_fixup; +// . . . +// static address f2i_fixup() { return _f2i_fixup; } +// static void set_f2i_fixup(address a) { _f2i_fixup = a; } +// + + +//-------------------------------------------------- +// Stub Generator Blob, Stub and Entry Declarations +// ------------------------------------------------- +// +// The formal declarations of blobs, stubs and entries provided below +// are used to schedule application of template macros that either +// declare or define the C++ code we need to manage those blobs, stubs +// and entries. +// +// All ports employ the same blobs. However, the organization of the +// stubs and entry points in a blob can vary from one port to the +// next. A template macro is provided to specify the details of each +// blob, including generic and arch-specific variations. +// +// If you want to define a new stub or entry then you can do so by +// adding suitable declarations within the scope of the relevant blob. +// For the blob with name BLOB_NAME add your declarations to macro +// STUBGEN__STUBS_DO. Generic stubs and entries are +// declared using the do_stub, do_entry and do_entry_init and +// array_entry templates (see below for full details). The do_blob +// and end_blob templates should never need to be modified. +// +// Some stubs and their associated entries are architecture-specific. +// They need to be declared in the architecture-specific header file +// src/cpu/stubDecolaration_.cpp. For the blob with name +// BLOB_NAME the correspnding declarations macro are provided by macro +// STUBGEN__STUBS_ARCH_DO. Arch-specific stubs and entries +// are declared using the do_stub, do_arch_entry and +// do_arch_entry_init templates (see below for details). An +// architecure also needs to specify architecture parameters used when +// creating each blob. These are defined using the do_arch_blob +// template (see below). +// +// Note, the client macro STUBGEN_ALL_DO is provided to allow client +// code to iterate over all blob, stub or entry declarations. It has +// only been split into separate per-blob generic submacros, +// STUBGEN__BLOBS_DO and arch-specific per-blob submacros +// STUBGEN__BLOBS_ARCH_DO for convenience, to make it +// easier to manage definitions. The blob_specific sub-macros should +// not be called directly by client code (in class StubRoutines and +// StubGenerator), +// +// A client wishing to generate blob, stub or entry code elements is +// expected to pass template macros as arguments to STUBGEN_ALL_DO. +// This will schedule code generation code for whatever C++ code +// elements are required to implement a declaration or definition +// relevant to each blob, stub or entry. Alternatively, a client can +// operate on a subset of the declarations by calling macros +// STUBGEN_BLOBS_DO, STUBGEN_STUBS_DO, STUBGEN_BLOBS_STUBS_DO, +// STUBGEN_ENTRIES_DO and STUBGEN_ARCH_ENTRIES_DO. +// +// The do_blob and end_blob templates receive a blob name as argument. +// +// do_blob(blob_name) +// end_blob(blob_name) +// +// do_blob is primarily used to define a global enum tag for a blob +// and an associated constant string name, both for use by client +// code. +// +// end_blob is provided for use in combination with do_blob to to open +// and close a blob-local enum type identifying all stubs within a +// given blob. This enum is private to the stub management code and +// used to validate correct use of stubs within a given blob. +// +// The do_stub template receives a blob name and stub name as argument. +// +// do_stub(blob_name, stub_name) +// +// do_stub is primarily used to define a global enum tag for a stub +// and a constant string name, both for use by client code. It is also +// used to declare a tag within the blob-local enum type used to +// validate correct use of stubs within their declared blob. Finally, +// it is also used to declare a name for each stub. +// +// The do_entry and do_entry_array templates receive 4 or 5 arguments +// +// do_entry(blob_name, stub_name, field_name, getter_name) +// +// do_entry_init(blob_name, stub_name, field_name, getter_name, init_function) +// +// do_entry_array(blob_name, stub_name, field_name, getter_name, count) +// +// do_entry is used to declare or define a static field of class +// StubRoutines with type address that stores a specific entry point +// for a given stub. n.b. the number of entries associated with a stub +// is often one but it can be more than one and, in a few special +// cases, it is zero. do_entry is also used to declare and define an +// associated getter method for the field. do_entry is used to declare +// fields that should be initialized to nullptr. +// +// do_entry_init is used when the field needs to be initialized a +// specific function or method . +// +// do_entry_array is used for the special case where a stub employs an +// array to store multiple entries which are stored at generate time +// and subsequently accessed using an associated index (e.g. the +// secondary supers table stub which has 63 qassociated entries). +// Note that this distinct from the case where a stub generates +// multiple entries each of them stored in its own named field with +// its own named getter. In the latter case multiple do_entry or +// do_entry_init declarations are associated with the stub. +// +// blob_name and stub_name are the names of the blob and stub to which +// the entry belongs. +// +// field_name is prefixed with a leading '_' to produce the name of +// the field used to store an entry address for the stub. For stubs +// with one entry field_name is normally, but not always, the same as +// stub_name. Obviously when a stub has multiple entries secondary +// names must be different to stub_name. For normal entry declarations +// the field type is address. For do_entry_array declarations the field +// type is an address[] whose size is defined by then parameter. +// +// getter_name is the name of a getter that is generated to allow +// access to the field. It is normally, but not always, the same as +// stub_name. For normal entry declarations the getter signature is +// (void). For do_entry_array declarations the getter signature is +// (int). +// +// init_function is the name of an function or method which should be +// assigned to the field as a default value (n.b. fields declared +// using do_entry are intialised to nullptr, array fields declared +// using do_entry_array have their elements initalized to nullptr). +// +// Architecture-specific blob details need to be specified using the +// do_arch_blob template +// +// do_arch_blob(blob_name, size) +// +// Currently, the do_arch_blob macro is only used to define the size +// of the code buffer into which blob-specific stub code is to be +// generated. +// +// Architecture-specific entries need to be declared using the +// do_arch_entry template +// +// do_arch_entry(arch, blob_name, stub_name, field_name, getter_name) +// +// do_arch_entry_init(arch, blob_name, stub_name, field_name, +// getter_name, init_function) +// +// The only difference between these templates and the generic ones is +// that they receive an extra argument which identifies the current +// architecture e.g. x86, aarch64 etc. +// +// Currently there is no support for a do_arch_array_entry template. + +// Include arch-specific stub and entry declarations and make sure the +// relevant template macros ahve been defined + +#include CPU_HEADER(stubDeclarations) + +#ifndef STUBGEN_INITIAL_BLOBS_ARCH_DO +#error "Arch-specific directory failed to declare required initial stubs and entries" +#endif + +#ifndef STUBGEN_CONTINUATION_BLOBS_ARCH_DO +#error "Arch-specific directory failed to declare required continuation stubs and entries" +#endif + +#ifndef STUBGEN_COMPILER_BLOBS_ARCH_DO +#error "Arch-specific directory failed to declare required compiler stubs and entries" +#endif + +#ifndef STUBGEN_FINAL_BLOBS_ARCH_DO +#error "Arch-specific directory failed to declare required final stubs and entries" +#endif + +// Iterator macros to apply templates to all relevant blobs, stubs and +// entries. Clients should use STUBGEN_ALL_DO, STUBGEN_BLOBS_DO, +// STUBGEN_STUBS_DO, STUBGEN_BLOBS_STUBS_DO, STUBGEN_ENTRIES_DO, +// STUBGEN_ARCH_BLOBS_DO and STUBGEN_ARCH_ENTRIES_DO. +// +// n.b. Client macros appear after the STUBGEN__BLOBS_DO +// submacros which follow next. These submacros are not intended to be +// called directly. They serve to define the main client macro +// STUBGEN_ALL_DO and, from there, the other more specific client +// macros. n.b. multiple, 'per-blob' submacros are used to declare +// each group of stubs and entries, because that makes it simpler to +// lookup and update related elements. If you need to update these +// submacros to change the list of stubs or entries be sure to locate +// stubs within the correct blob and locate entry declarations +// immediately after their associated stub declaration. + +#define STUBGEN_INITIAL_BLOBS_DO(do_blob, end_blob, \ + do_stub, \ + do_entry, do_entry_init, \ + do_entry_array, \ + do_arch_blob, \ + do_arch_entry, do_arch_entry_init) \ + do_blob(initial) \ + do_stub(initial, call_stub) \ + do_entry(initial, call_stub, call_stub_entry, call_stub_entry) \ + do_entry(initial, call_stub, call_stub_return_address, \ + call_stub_return_address) \ + do_stub(initial, forward_exception) \ + do_entry(initial, forward_exception, forward_exception_entry, \ + forward_exception_entry) \ + do_stub(initial, catch_exception) \ + do_entry(initial, catch_exception, catch_exception_entry, \ + catch_exception_entry) \ + do_stub(initial, fence) \ + do_entry(initial, fence, fence_entry, fence_entry) \ + do_stub(initial, atomic_xchg) \ + do_entry(initial, atomic_xchg, atomic_xchg_entry, atomic_xchg_entry) \ + do_stub(initial, atomic_cmpxchg) \ + do_entry(initial, atomic_cmpxchg, atomic_cmpxchg_entry, \ + atomic_cmpxchg_entry) \ + do_stub(initial, atomic_cmpxchg_long) \ + do_entry(initial, atomic_cmpxchg_long, atomic_cmpxchg_long_entry, \ + atomic_cmpxchg_long_entry) \ + do_stub(initial, updateBytesCRC32) \ + do_entry(initial, updateBytesCRC32, updateBytesCRC32, \ + updateBytesCRC32) \ + do_entry(initial, updateBytesCRC32, crc_table_adr, crc_table_addr) \ + do_stub(initial, updateBytesCRC32C) \ + do_entry(initial, updateBytesCRC32C, updateBytesCRC32C, \ + updateBytesCRC32C) \ + do_entry(initial, updateBytesCRC32C, crc32c_table_addr, \ + crc32c_table_addr) \ + do_stub(initial, f2hf) \ + do_entry(initial, f2hf, f2hf, f2hf_adr) \ + do_stub(initial, hf2f) \ + do_entry(initial, hf2f, hf2f, hf2f_adr) \ + do_stub(initial, dexp) \ + do_entry(initial, dexp, dexp, dexp) \ + do_stub(initial, dlog) \ + do_entry(initial, dlog, dlog, dlog) \ + do_stub(initial, dlog10) \ + do_entry(initial, dlog10, dlog10, dlog10) \ + do_stub(initial, dpow) \ + do_entry(initial, dpow, dpow, dpow) \ + do_stub(initial, dsin) \ + do_entry(initial, dsin, dsin, dsin) \ + do_stub(initial, dcos) \ + do_entry(initial, dcos, dcos, dcos) \ + do_stub(initial, dtan) \ + do_entry(initial, dtan, dtan, dtan) \ + do_stub(initial, dtanh) \ + do_entry(initial, dtanh, dtanh, dtanh) \ + do_stub(initial, fmod) \ + do_entry(initial, fmod, fmod, fmod) \ + /* following generic entries should really be x86_32 only */ \ + do_stub(initial, dlibm_sin_cos_huge) \ + do_entry(initial, dlibm_sin_cos_huge, dlibm_sin_cos_huge, \ + dlibm_sin_cos_huge) \ + do_stub(initial, dlibm_reduce_pi04l) \ + do_entry(initial, dlibm_reduce_pi04l, dlibm_reduce_pi04l, \ + dlibm_reduce_pi04l) \ + do_stub(initial, dlibm_tan_cot_huge) \ + do_entry(initial, dlibm_tan_cot_huge, dlibm_tan_cot_huge, \ + dlibm_tan_cot_huge) \ + /* merge in stubs and entries declared in arch header */ \ + STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, do_arch_blob, \ + do_arch_entry, do_arch_entry_init) \ + end_blob(initial) \ + + +#define STUBGEN_CONTINUATION_BLOBS_DO(do_blob, end_blob, \ + do_stub, \ + do_entry, do_entry_init, \ + do_entry_array, \ + do_arch_blob, \ + do_arch_entry, do_arch_entry_init) \ + do_blob(continuation) \ + do_stub(continuation, cont_thaw) \ + do_entry(continuation, cont_thaw, cont_thaw, cont_thaw) \ + do_stub(continuation, cont_preempt) \ + do_entry(continuation, cont_prempt, cont_preempt_stub, \ + cont_preempt_stub) \ + do_stub(continuation, cont_returnBarrier) \ + do_entry(continuation, cont_returnBarrier, cont_returnBarrier, \ + cont_returnBarrier) \ + do_stub(continuation, cont_returnBarrierExc) \ + do_entry(continuation, cont_returnBarrierExc, cont_returnBarrierExc, \ + cont_returnBarrierExc) \ + /* merge in stubs and entries declared in arch header */ \ + STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, do_arch_blob, \ + do_arch_entry, do_arch_entry_init) \ + end_blob(continuation) \ + + +#define STUBGEN_COMPILER_BLOBS_DO(do_blob, end_blob, \ + do_stub, \ + do_entry, do_entry_init, \ + do_entry_array, \ + do_arch_blob, \ + do_arch_entry, do_arch_entry_init) \ + do_blob(compiler) \ + do_stub(compiler, atomic_add) \ + do_entry(compiler, atomic_add, atomic_add_entry, atomic_add_entry) \ + do_stub(compiler, array_sort) \ + do_entry(compiler, array_sort, array_sort, select_arraysort_function) \ + do_stub(compiler, array_partition) \ + do_entry(compiler, array_partition, array_partition, \ + select_array_partition_function) \ + do_stub(compiler, aescrypt_encryptBlock) \ + do_entry(compiler, aescrypt_encryptBlock, aescrypt_encryptBlock, \ + aescrypt_encryptBlock) \ + do_stub(compiler, aescrypt_decryptBlock) \ + do_entry(compiler, aescrypt_decryptBlock, aescrypt_decryptBlock, \ + aescrypt_decryptBlock) \ + do_stub(compiler, cipherBlockChaining_encryptAESCrypt) \ + do_entry(compiler, cipherBlockChaining_encryptAESCrypt, \ + cipherBlockChaining_encryptAESCrypt, \ + cipherBlockChaining_encryptAESCrypt) \ + do_stub(compiler, cipherBlockChaining_decryptAESCrypt) \ + do_entry(compiler, cipherBlockChaining_decryptAESCrypt, \ + cipherBlockChaining_decryptAESCrypt, \ + cipherBlockChaining_decryptAESCrypt) \ + do_stub(compiler, electronicCodeBook_encryptAESCrypt) \ + do_entry(compiler, electronicCodeBook_encryptAESCrypt, \ + electronicCodeBook_encryptAESCrypt, \ + electronicCodeBook_encryptAESCrypt) \ + do_stub(compiler, electronicCodeBook_decryptAESCrypt) \ + do_entry(compiler, electronicCodeBook_decryptAESCrypt, \ + electronicCodeBook_decryptAESCrypt, \ + electronicCodeBook_decryptAESCrypt) \ + do_stub(compiler, counterMode_AESCrypt) \ + do_entry(compiler, counterMode_AESCrypt, counterMode_AESCrypt, \ + counterMode_AESCrypt) \ + do_stub(compiler, galoisCounterMode_AESCrypt) \ + do_entry(compiler, galoisCounterMode_AESCrypt, \ + galoisCounterMode_AESCrypt, galoisCounterMode_AESCrypt) \ + do_stub(compiler, ghash_processBlocks) \ + do_entry(compiler, ghash_processBlocks, ghash_processBlocks, \ + ghash_processBlocks) \ + do_stub(compiler, chacha20Block) \ + do_entry(compiler, chacha20Block, chacha20Block, chacha20Block) \ + do_stub(compiler, data_cache_writeback) \ + do_entry(compiler, data_cache_writeback, data_cache_writeback, \ + data_cache_writeback) \ + do_stub(compiler, data_cache_writeback_sync) \ + do_entry(compiler, data_cache_writeback_sync, \ + data_cache_writeback_sync, data_cache_writeback_sync) \ + do_stub(compiler, base64_encodeBlock) \ + do_entry(compiler, base64_encodeBlock, base64_encodeBlock, \ + base64_encodeBlock) \ + do_stub(compiler, base64_decodeBlock) \ + do_entry(compiler, base64_decodeBlock, base64_decodeBlock, \ + base64_decodeBlock) \ + do_stub(compiler, poly1305_processBlocks) \ + do_entry(compiler, poly1305_processBlocks, poly1305_processBlocks, \ + poly1305_processBlocks) \ + do_stub(compiler, intpoly_montgomeryMult_P256) \ + do_entry(compiler, intpoly_montgomeryMult_P256, \ + intpoly_montgomeryMult_P256, intpoly_montgomeryMult_P256) \ + do_stub(compiler, intpoly_assign) \ + do_entry(compiler, intpoly_assign, intpoly_assign, intpoly_assign) \ + do_stub(compiler, md5_implCompress) \ + do_entry(compiler, md5_implCompress, md5_implCompress, \ + md5_implCompress) \ + do_stub(compiler, md5_implCompressMB) \ + do_entry(compiler, md5_implCompressMB, md5_implCompressMB, \ + md5_implCompressMB) \ + do_stub(compiler, sha1_implCompress) \ + do_entry(compiler, sha1_implCompress, sha1_implCompress, \ + sha1_implCompress) \ + do_stub(compiler, sha1_implCompressMB) \ + do_entry(compiler, sha1_implCompressMB, sha1_implCompressMB, \ + sha1_implCompressMB) \ + do_stub(compiler, sha256_implCompress) \ + do_entry(compiler, sha256_implCompress, sha256_implCompress, \ + sha256_implCompress) \ + do_stub(compiler, sha256_implCompressMB) \ + do_entry(compiler, sha256_implCompressMB, sha256_implCompressMB, \ + sha256_implCompressMB) \ + do_stub(compiler, sha512_implCompress) \ + do_entry(compiler, sha512_implCompress, sha512_implCompress, \ + sha512_implCompress) \ + do_stub(compiler, sha512_implCompressMB) \ + do_entry(compiler, sha512_implCompressMB, sha512_implCompressMB, \ + sha512_implCompressMB) \ + do_stub(compiler, sha3_implCompress) \ + do_entry(compiler, sha3_implCompress, sha3_implCompress, \ + sha3_implCompress) \ + do_stub(compiler, sha3_implCompressMB) \ + do_entry(compiler, sha3_implCompressMB, sha3_implCompressMB, \ + sha3_implCompressMB) \ + do_stub(compiler, updateBytesAdler32) \ + do_entry(compiler, updateBytesAdler32, updateBytesAdler32, \ + updateBytesAdler32) \ + do_stub(compiler, multiplyToLen) \ + do_entry(compiler, multiplyToLen, multiplyToLen, multiplyToLen) \ + do_stub(compiler, squareToLen) \ + do_entry(compiler, squareToLen, squareToLen, squareToLen) \ + do_stub(compiler, mulAdd) \ + do_entry(compiler, mulAdd, mulAdd, mulAdd) \ + do_stub(compiler, montgomeryMultiply) \ + do_entry(compiler, montgomeryMultiply, montgomeryMultiply, \ + montgomeryMultiply) \ + do_stub(compiler, montgomerySquare) \ + do_entry(compiler, montgomerySquare, montgomerySquare, \ + montgomerySquare) \ + do_stub(compiler, bigIntegerRightShiftWorker) \ + do_entry(compiler, bigIntegerRightShiftWorker, \ + bigIntegerRightShiftWorker, bigIntegerRightShift) \ + do_stub(compiler, bigIntegerLeftShiftWorker) \ + do_entry(compiler, bigIntegerLeftShiftWorker, \ + bigIntegerLeftShiftWorker, bigIntegerLeftShift) \ + /* merge in stubs and entries declared in arch header */ \ + STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, do_arch_blob, \ + do_arch_entry, do_arch_entry_init) \ + end_blob(compiler) \ + + +#define STUBGEN_FINAL_BLOBS_DO(do_blob, end_blob, \ + do_stub, \ + do_entry, do_entry_init, \ + do_entry_array, \ + do_arch_blob, \ + do_arch_entry, do_arch_entry_init) \ + do_blob(final) \ + do_stub(final, verify_oop) \ + do_entry(final, verify_oop, verify_oop_subroutine_entry, \ + verify_oop_subroutine_entry) \ + do_stub(final, jbyte_arraycopy) \ + do_entry_init(final, jbyte_arraycopy, jbyte_arraycopy, \ + jbyte_arraycopy, StubRoutines::jbyte_copy) \ + do_stub(final, jshort_arraycopy) \ + do_entry_init(final, jshort_arraycopy, jshort_arraycopy, \ + jshort_arraycopy, StubRoutines::jshort_copy) \ + do_stub(final, jint_arraycopy) \ + do_entry_init(final, jint_arraycopy, jint_arraycopy, \ + jint_arraycopy, StubRoutines::jint_copy) \ + do_stub(final, jlong_arraycopy) \ + do_entry_init(final, jlong_arraycopy, jlong_arraycopy, \ + jlong_arraycopy, StubRoutines::jlong_copy) \ + do_stub(final, oop_arraycopy) \ + do_entry_init(final, oop_arraycopy, oop_arraycopy, \ + oop_arraycopy_entry, StubRoutines::oop_copy) \ + do_stub(final, oop_arraycopy_uninit) \ + do_entry_init(final, oop_arraycopy_uninit, oop_arraycopy_uninit, \ + oop_arraycopy_uninit_entry, \ + StubRoutines::oop_copy_uninit) \ + do_stub(final, jbyte_disjoint_arraycopy) \ + do_entry_init(final, jbyte_disjoint_arraycopy, \ + jbyte_disjoint_arraycopy, jbyte_disjoint_arraycopy, \ + StubRoutines::jbyte_copy) \ + do_stub(final, jshort_disjoint_arraycopy) \ + do_entry_init(final, jshort_disjoint_arraycopy, \ + jshort_disjoint_arraycopy, jshort_disjoint_arraycopy, \ + StubRoutines::jshort_copy) \ + do_stub(final, jint_disjoint_arraycopy) \ + do_entry_init(final, jint_disjoint_arraycopy, \ + jint_disjoint_arraycopy, jint_disjoint_arraycopy, \ + StubRoutines::jint_copy) \ + do_stub(final, jlong_disjoint_arraycopy) \ + do_entry_init(final, jlong_disjoint_arraycopy, \ + jlong_disjoint_arraycopy, jlong_disjoint_arraycopy, \ + StubRoutines::jlong_copy) \ + do_stub(final, oop_disjoint_arraycopy) \ + do_entry_init(final, oop_disjoint_arraycopy, oop_disjoint_arraycopy, \ + oop_disjoint_arraycopy_entry, StubRoutines::oop_copy) \ + do_stub(final, oop_disjoint_arraycopy_uninit) \ + do_entry_init(final, oop_disjoint_arraycopy_uninit, \ + oop_disjoint_arraycopy_uninit, \ + oop_disjoint_arraycopy_uninit_entry, \ + StubRoutines::oop_copy_uninit) \ + do_stub(final, arrayof_jbyte_arraycopy) \ + do_entry_init(final, arrayof_jbyte_arraycopy, \ + arrayof_jbyte_arraycopy, arrayof_jbyte_arraycopy, \ + StubRoutines::arrayof_jbyte_copy) \ + do_stub(final, arrayof_jshort_arraycopy) \ + do_entry_init(final, arrayof_jshort_arraycopy, \ + arrayof_jshort_arraycopy, arrayof_jshort_arraycopy, \ + StubRoutines::arrayof_jshort_copy) \ + do_stub(final, arrayof_jint_arraycopy) \ + do_entry_init(final, arrayof_jint_arraycopy, arrayof_jint_arraycopy, \ + arrayof_jint_arraycopy, \ + StubRoutines::arrayof_jint_copy) \ + do_stub(final, arrayof_jlong_arraycopy) \ + do_entry_init(final, arrayof_jlong_arraycopy, \ + arrayof_jlong_arraycopy, arrayof_jlong_arraycopy, \ + StubRoutines::arrayof_jlong_copy) \ + do_stub(final, arrayof_oop_arraycopy) \ + do_entry_init(final, arrayof_oop_arraycopy, arrayof_oop_arraycopy, \ + arrayof_oop_arraycopy, StubRoutines::arrayof_oop_copy) \ + do_stub(final, arrayof_oop_arraycopy_uninit) \ + do_entry_init(final, arrayof_oop_arraycopy_uninit, \ + arrayof_oop_arraycopy_uninit, \ + arrayof_oop_arraycopy_uninit, \ + StubRoutines::arrayof_oop_copy_uninit) \ + do_stub(final, arrayof_jbyte_disjoint_arraycopy) \ + do_entry_init(final, arrayof_jbyte_disjoint_arraycopy, \ + arrayof_jbyte_disjoint_arraycopy, \ + arrayof_jbyte_disjoint_arraycopy, \ + StubRoutines::arrayof_jbyte_copy) \ + do_stub(final, arrayof_jshort_disjoint_arraycopy) \ + do_entry_init(final, arrayof_jshort_disjoint_arraycopy, \ + arrayof_jshort_disjoint_arraycopy, \ + arrayof_jshort_disjoint_arraycopy, \ + StubRoutines::arrayof_jshort_copy) \ + do_stub(final, arrayof_jint_disjoint_arraycopy) \ + do_entry_init(final, arrayof_jint_disjoint_arraycopy, \ + arrayof_jint_disjoint_arraycopy, \ + arrayof_jint_disjoint_arraycopy, \ + StubRoutines::arrayof_jint_copy) \ + do_stub(final, arrayof_jlong_disjoint_arraycopy) \ + do_entry_init(final, arrayof_jlong_disjoint_arraycopy, \ + arrayof_jlong_disjoint_arraycopy, \ + arrayof_jlong_disjoint_arraycopy, \ + StubRoutines::arrayof_jlong_copy) \ + do_stub(final, arrayof_oop_disjoint_arraycopy) \ + do_entry_init(final, arrayof_oop_disjoint_arraycopy, \ + arrayof_oop_disjoint_arraycopy, \ + arrayof_oop_disjoint_arraycopy_entry, \ + StubRoutines::arrayof_oop_copy) \ + do_stub(final, arrayof_oop_disjoint_arraycopy_uninit) \ + do_entry_init(final, arrayof_oop_disjoint_arraycopy_uninit, \ + arrayof_oop_disjoint_arraycopy_uninit, \ + arrayof_oop_disjoint_arraycopy_uninit_entry, \ + StubRoutines::arrayof_oop_copy_uninit) \ + do_stub(final, checkcast_arraycopy) \ + do_entry(final, checkcast_arraycopy, checkcast_arraycopy, \ + checkcast_arraycopy_entry) \ + do_stub(final, checkcast_arraycopy_uninit) \ + do_entry(final, checkcast_arraycopy_uninit, \ + checkcast_arraycopy_uninit, \ + checkcast_arraycopy_uninit_entry) \ + do_stub(final, unsafe_arraycopy) \ + do_entry(final, unsafe_arraycopy, unsafe_arraycopy, unsafe_arraycopy) \ + do_stub(final, generic_arraycopy) \ + do_entry(final, generic_arraycopy, generic_arraycopy, \ + generic_arraycopy) \ + do_stub(final, unsafe_setmemory) \ + do_entry(final, unsafe_setmemory, unsafe_setmemory, unsafe_setmemory) \ + do_stub(final, jbyte_fill) \ + do_entry(final, jbyte_fill, jbyte_fill, jbyte_fill) \ + do_stub(final, jshort_fill) \ + do_entry(final, jshort_fill, jshort_fill, jshort_fill) \ + do_stub(final, jint_fill) \ + do_entry(final, jint_fill, jint_fill, jint_fill) \ + do_stub(final, arrayof_jbyte_fill) \ + do_entry(final, arrayof_jbyte_fill, arrayof_jbyte_fill, \ + arrayof_jbyte_fill) \ + do_stub(final, arrayof_jshort_fill) \ + do_entry(final, arrayof_jshort_fill, arrayof_jshort_fill, \ + arrayof_jshort_fill) \ + do_stub(final, arrayof_jint_fill) \ + do_entry(final, arrayof_jint_fill, arrayof_jint_fill, \ + arrayof_jint_fill) \ + do_stub(final, method_entry_barrier) \ + do_entry(final, method_entry_barrier, method_entry_barrier, \ + method_entry_barrier) \ + do_stub(final, vectorizedMismatch) /* only used by x86! */ \ + do_entry(final, vectorizedMismatch, vectorizedMismatch, \ + vectorizedMismatch) \ + do_stub(final, upcall_stub_exception_handler) \ + do_entry(final, upcall_stub_exception_handler, \ + upcall_stub_exception_handler, \ + upcall_stub_exception_handler) \ + do_stub(final, upcall_stub_load_target) \ + do_entry(final, upcall_stub_load_target, upcall_stub_load_target, \ + upcall_stub_load_target) \ + do_stub(final, lookup_secondary_supers_table) \ + do_entry_array(final, lookup_secondary_supers_table, \ + lookup_secondary_supers_table_stubs, \ + lookup_secondary_supers_table_stub, \ + Klass::SECONDARY_SUPERS_TABLE_SIZE) \ + do_stub(final, lookup_secondary_supers_table_slow_path) \ + do_entry(final, lookup_secondary_supers_table_slow_path, \ + lookup_secondary_supers_table_slow_path_stub, \ + lookup_secondary_supers_table_slow_path_stub) \ + /* merge in stubs and entries declared in arch header */ \ + STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, do_arch_blob, \ + do_arch_entry, do_arch_entry_init) \ + end_blob(final) \ + + +// Convenience macros for use by template implementations #define STUB_ID_NAME(base) base##_id -// generate a stub field name +// emit a runtime or stubgen stub field name #define STUB_FIELD_NAME(base) _##base -// generate a blob field name +// emit a runtime blob field name #define BLOB_FIELD_NAME(base) _##base##_blob +// emit a stubgen blob field name + +#define STUBGEN_BLOB_FIELD_NAME(base) _ ## base ## _stubs_code + +// Convenience templates that emit nothing + +// ignore do_blob(blob_name, type) declarations +#define DO_BLOB_EMPTY2(blob_name, type) + +// ignore do_blob(blob_name) and end_blob(blob_name) declarations +#define DO_BLOB_EMPTY1(blob_name) + +// ignore do_stub(name, fancy_jump, pass_tls, return_pc) declarations +#define DO_STUB_EMPTY4(name, fancy_jump, pass_tls, return_pc) + +// ignore do_jvmti_stub(name) declarations +#define DO_JVMTI_STUB_EMPTY1(stub_name) + +// ignore do_stub(blob_name, stub_name) declarations +#define DO_STUB_EMPTY2(blob_name, stub_name) + +// ignore do_entry(blob_name, stub_name, fieldname, getter_name) declarations +#define DO_ENTRY_EMPTY4(blob_name, stub_name, fieldname, getter_name) + +// ignore do_entry(blob_name, stub_name, fieldname, getter_name, init_function) and +// do_entry_array(blob_name, stub_name, fieldname, getter_name, count) declarations +#define DO_ENTRY_EMPTY5(blob_name, stub_name, fieldname, getter_name, init_function) + +// ignore do_arch_blob(blob_name, size) declarations +#define DO_ARCH_BLOB_EMPTY2(arch, size) + +// ignore do_arch_entry(arch, blob_name, stub_name, fieldname, getter_name) declarations +#define DO_ARCH_ENTRY_EMPTY5(arch, blob_name, stub_name, field_name, getter_name) + +// ignore do_arch_entry(arch, blob_name, stub_name, fieldname, getter_name, init_function) declarations +#define DO_ARCH_ENTRY_EMPTY6(arch, blob_name, stub_name, field_name, getter_name, init_function) + +// The whole shebang! +// +// client macro for emitting StubGenerator blobs, stubs and entries + +#define STUBGEN_ALL_DO(do_blob, end_blob, \ + do_stub, \ + do_entry, do_entry_init, \ + do_entry_array, \ + do_arch_blob, \ + do_arch_entry, do_arch_entry_init) \ + STUBGEN_INITIAL_BLOBS_DO(do_blob, end_blob, \ + do_stub, \ + do_entry, do_entry_init, \ + do_entry_array, \ + do_arch_blob, \ + do_arch_entry, do_arch_entry_init) \ + STUBGEN_CONTINUATION_BLOBS_DO(do_blob, end_blob, \ + do_stub, \ + do_entry, do_entry_init, \ + do_entry_array, \ + do_arch_blob, \ + do_arch_entry, do_arch_entry_init) \ + STUBGEN_COMPILER_BLOBS_DO(do_blob, end_blob, \ + do_stub, \ + do_entry, do_entry_init, \ + do_entry_array, \ + do_arch_blob, \ + do_arch_entry, do_arch_entry_init) \ + STUBGEN_FINAL_BLOBS_DO(do_blob, end_blob, \ + do_stub, \ + do_entry, do_entry_init, \ + do_entry_array, \ + do_arch_blob, \ + do_arch_entry, do_arch_entry_init) \ + +// client macro to operate only on StubGenerator blobs + +#define STUBGEN_BLOBS_DO(do_blob) \ + STUBGEN_ALL_DO(do_blob, DO_BLOB_EMPTY1, \ + DO_STUB_EMPTY2, \ + DO_ENTRY_EMPTY4, DO_ENTRY_EMPTY5, \ + DO_ENTRY_EMPTY5, \ + DO_ARCH_BLOB_EMPTY2, \ + DO_ARCH_ENTRY_EMPTY5, DO_ARCH_ENTRY_EMPTY6) \ + +// client macro to operate only on StubGenerator stubs + +#define STUBGEN_STUBS_DO(do_stub) \ + STUBGEN_ALL_DO(DO_BLOB_EMPTY1, DO_BLOB_EMPTY1, \ + do_stub, \ + DO_ENTRY_EMPTY4, DO_ENTRY_EMPTY5, \ + DO_ENTRY_EMPTY5, \ + DO_ARCH_BLOB_EMPTY2, \ + DO_ARCH_ENTRY_EMPTY5, DO_ARCH_ENTRY_EMPTY6) \ + +// client macro to operate only on StubGenerator blobs and stubs + +#define STUBGEN_BLOBS_STUBS_DO(do_blob, end_blob, do_stub) \ + STUBGEN_ALL_DO(do_blob, end_blob, \ + do_stub, \ + DO_ENTRY_EMPTY4, DO_ENTRY_EMPTY5, \ + DO_ENTRY_EMPTY5, \ + DO_ARCH_BLOB_EMPTY2, \ + DO_ARCH_ENTRY_EMPTY5,DO_ARCH_ENTRY_EMPTY6) \ + +// client macro to operate only on StubGenerator entries + +#define STUBGEN_ENTRIES_DO(do_entry, do_entry_init, do_entry_array) \ + STUBGEN_ALL_DO(DO_BLOB_EMPTY1, DO_BLOB_EMPTY1, \ + DO_STUB_EMPTY2, \ + do_entry, do_entry_init, \ + do_entry_array, \ + DO_ARCH_BLOB_EMPTY2, \ + DO_ARCH_ENTRY_EMPTY5, DO_ARCH_ENTRY_EMPTY6) \ + + +// client macro to operate only on StubGenerator arch blobs + +#define STUBGEN_ARCH_BLOBS_DO(do_arch_blob) \ + STUBGEN_ALL_DO(DO_BLOB_EMPTY1, DO_BLOB_EMPTY1, \ + DO_STUB_EMPTY2, \ + DO_ENTRY_EMPTY4, DO_ENTRY_EMPTY5, \ + DO_ENTRY_EMPTY5, \ + do_arch_blob, \ + DO_ARCH_ENTRY_EMPTY5, DO_ARCH_ENTRY_EMPTY6) \ + +// client macro to operate only on StubGenerator arch entries + +#define STUBGEN_ARCH_ENTRIES_DO(do_arch_entry, do_arch_entry_init) \ + STUBGEN_ALL_DO(DO_BLOB_EMPTY1, DO_BLOB_EMPTY1, \ + DO_STUB_EMPTY2, \ + DO_ENTRY_EMPTY4, DO_ENTRY_EMPTY5, \ + DO_ENTRY_EMPTY5, \ + DO_ARCH_BLOB_EMPTY2, \ + do_arch_entry, do_arch_entry_init) \ + #endif // SHARE_RUNTIME_STUBDECLARATIONS_HPP diff --git a/src/hotspot/share/runtime/stubRoutines.cpp b/src/hotspot/share/runtime/stubRoutines.cpp index 2c0a3baff4bd0..dd1da2c9e668a 100644 --- a/src/hotspot/share/runtime/stubRoutines.cpp +++ b/src/hotspot/share/runtime/stubRoutines.cpp @@ -45,154 +45,141 @@ int UnsafeMemoryAccess::_table_length = 0; int UnsafeMemoryAccess::_table_max_length = 0; address UnsafeMemoryAccess::_common_exit_stub_pc = nullptr; -// Implementation of StubRoutines - for a description -// of how to extend it, see the header file. +// Implementation of StubRoutines - for a description of how to +// declare new blobs, stubs and entries , see stubDefinitions.hpp. -// Class Variables +// define arrays to hold stub and blob names -BufferBlob* StubRoutines::_initial_stubs_code = nullptr; -BufferBlob* StubRoutines::_final_stubs_code = nullptr; -BufferBlob* StubRoutines::_compiler_stubs_code = nullptr; -BufferBlob* StubRoutines::_continuation_stubs_code = nullptr; +// use a template to generate the initializer for the blob names array -address StubRoutines::_call_stub_return_address = nullptr; -address StubRoutines::_call_stub_entry = nullptr; +#define DEFINE_BLOB_NAME(blob_name) \ + # blob_name, -address StubRoutines::_catch_exception_entry = nullptr; -address StubRoutines::_forward_exception_entry = nullptr; -jint StubRoutines::_verify_oop_count = 0; -address StubRoutines::_verify_oop_subroutine_entry = nullptr; -address StubRoutines::_atomic_xchg_entry = nullptr; -address StubRoutines::_atomic_cmpxchg_entry = nullptr; -address StubRoutines::_atomic_cmpxchg_long_entry = nullptr; -address StubRoutines::_atomic_add_entry = nullptr; -address StubRoutines::_fence_entry = nullptr; - -// Compiled code entry points default values -// The default functions don't have separate disjoint versions. -address StubRoutines::_jbyte_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jbyte_copy); -address StubRoutines::_jshort_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jshort_copy); -address StubRoutines::_jint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jint_copy); -address StubRoutines::_jlong_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jlong_copy); -address StubRoutines::_oop_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::oop_copy); -address StubRoutines::_oop_arraycopy_uninit = CAST_FROM_FN_PTR(address, StubRoutines::oop_copy_uninit); -address StubRoutines::_jbyte_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jbyte_copy); -address StubRoutines::_jshort_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jshort_copy); -address StubRoutines::_jint_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jint_copy); -address StubRoutines::_jlong_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jlong_copy); -address StubRoutines::_oop_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::oop_copy); -address StubRoutines::_oop_disjoint_arraycopy_uninit = CAST_FROM_FN_PTR(address, StubRoutines::oop_copy_uninit); - -address StubRoutines::_arrayof_jbyte_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jbyte_copy); -address StubRoutines::_arrayof_jshort_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jshort_copy); -address StubRoutines::_arrayof_jint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jint_copy); -address StubRoutines::_arrayof_jlong_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jlong_copy); -address StubRoutines::_arrayof_oop_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_oop_copy); -address StubRoutines::_arrayof_oop_arraycopy_uninit = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_oop_copy_uninit); -address StubRoutines::_arrayof_jbyte_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jbyte_copy); -address StubRoutines::_arrayof_jshort_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jshort_copy); -address StubRoutines::_arrayof_jint_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jint_copy); -address StubRoutines::_arrayof_jlong_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jlong_copy); -address StubRoutines::_arrayof_oop_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_oop_copy); -address StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_oop_copy_uninit); - -address StubRoutines::_data_cache_writeback = nullptr; -address StubRoutines::_data_cache_writeback_sync = nullptr; - -address StubRoutines::_checkcast_arraycopy = nullptr; -address StubRoutines::_checkcast_arraycopy_uninit = nullptr; -address StubRoutines::_unsafe_arraycopy = nullptr; -address StubRoutines::_generic_arraycopy = nullptr; - -address StubRoutines::_unsafe_setmemory = nullptr; - -address StubRoutines::_jbyte_fill; -address StubRoutines::_jshort_fill; -address StubRoutines::_jint_fill; -address StubRoutines::_arrayof_jbyte_fill; -address StubRoutines::_arrayof_jshort_fill; -address StubRoutines::_arrayof_jint_fill; - -address StubRoutines::_aescrypt_encryptBlock = nullptr; -address StubRoutines::_aescrypt_decryptBlock = nullptr; -address StubRoutines::_cipherBlockChaining_encryptAESCrypt = nullptr; -address StubRoutines::_cipherBlockChaining_decryptAESCrypt = nullptr; -address StubRoutines::_electronicCodeBook_encryptAESCrypt = nullptr; -address StubRoutines::_electronicCodeBook_decryptAESCrypt = nullptr; -address StubRoutines::_counterMode_AESCrypt = nullptr; -address StubRoutines::_galoisCounterMode_AESCrypt = nullptr; -address StubRoutines::_ghash_processBlocks = nullptr; -address StubRoutines::_chacha20Block = nullptr; -address StubRoutines::_base64_encodeBlock = nullptr; -address StubRoutines::_base64_decodeBlock = nullptr; -address StubRoutines::_poly1305_processBlocks = nullptr; -address StubRoutines::_intpoly_montgomeryMult_P256 = nullptr; -address StubRoutines::_intpoly_assign = nullptr; - -address StubRoutines::_md5_implCompress = nullptr; -address StubRoutines::_md5_implCompressMB = nullptr; -address StubRoutines::_sha1_implCompress = nullptr; -address StubRoutines::_sha1_implCompressMB = nullptr; -address StubRoutines::_sha256_implCompress = nullptr; -address StubRoutines::_sha256_implCompressMB = nullptr; -address StubRoutines::_sha512_implCompress = nullptr; -address StubRoutines::_sha512_implCompressMB = nullptr; -address StubRoutines::_sha3_implCompress = nullptr; -address StubRoutines::_sha3_implCompressMB = nullptr; - -address StubRoutines::_updateBytesCRC32 = nullptr; -address StubRoutines::_crc_table_adr = nullptr; +const char* StubRoutines::_blob_names[StubGenBlobId::NUM_BLOBIDS] = { + STUBGEN_BLOBS_DO(DEFINE_BLOB_NAME) +}; -address StubRoutines::_string_indexof_array[4] = { nullptr }; +#undef DEFINE_BLOB_NAME + +#define DEFINE_STUB_NAME(blob_name, stub_name) \ + # stub_name , \ + +// use a template to generate the initializer for the stub names array +const char* StubRoutines::_stub_names[StubGenStubId::NUM_STUBIDS] = { + STUBGEN_STUBS_DO(DEFINE_STUB_NAME) +}; + +#undef DEFINE_STUB_NAME + +// Define fields used to store blobs + +#define DEFINE_BLOB_FIELD(blob_name) \ + BufferBlob* StubRoutines:: STUBGEN_BLOB_FIELD_NAME(blob_name) = nullptr; + +STUBGEN_BLOBS_DO(DEFINE_BLOB_FIELD) + +#undef DEFINE_BLOB_FIELD -address StubRoutines::_crc32c_table_addr = nullptr; -address StubRoutines::_updateBytesCRC32C = nullptr; -address StubRoutines::_updateBytesAdler32 = nullptr; - -address StubRoutines::_multiplyToLen = nullptr; -address StubRoutines::_squareToLen = nullptr; -address StubRoutines::_mulAdd = nullptr; -address StubRoutines::_montgomeryMultiply = nullptr; -address StubRoutines::_montgomerySquare = nullptr; -address StubRoutines::_bigIntegerRightShiftWorker = nullptr; -address StubRoutines::_bigIntegerLeftShiftWorker = nullptr; - -address StubRoutines::_vectorizedMismatch = nullptr; - -address StubRoutines::_dexp = nullptr; -address StubRoutines::_dlog = nullptr; -address StubRoutines::_dlog10 = nullptr; -address StubRoutines::_fmod = nullptr; -address StubRoutines::_dpow = nullptr; -address StubRoutines::_dsin = nullptr; -address StubRoutines::_dcos = nullptr; -address StubRoutines::_dlibm_sin_cos_huge = nullptr; -address StubRoutines::_dlibm_reduce_pi04l = nullptr; -address StubRoutines::_dlibm_tan_cot_huge = nullptr; -address StubRoutines::_dtan = nullptr; -address StubRoutines::_dtanh = nullptr; - -address StubRoutines::_f2hf = nullptr; -address StubRoutines::_hf2f = nullptr; +// Define fields used to store stub entries +#define DEFINE_ENTRY_FIELD(blob_name, stub_name, field_name, getter_name) \ + address StubRoutines:: STUB_FIELD_NAME(field_name) = nullptr; + +#define DEFINE_ENTRY_FIELD_INIT(blob_name, stub_name, field_name, getter_name, init_function) \ + address StubRoutines:: STUB_FIELD_NAME(field_name) = CAST_FROM_FN_PTR(address, init_function); + +#define DEFINE_ENTRY_FIELD_ARRAY(blob_name, stub_name, field_name, getter_name, count) \ + address StubRoutines:: STUB_FIELD_NAME(field_name)[count] = { nullptr }; + +STUBGEN_ENTRIES_DO(DEFINE_ENTRY_FIELD, DEFINE_ENTRY_FIELD_INIT, DEFINE_ENTRY_FIELD_ARRAY) + +#undef DEFINE_ENTRY_FIELD_ARRAY +#undef DEFINE_ENTRY_FIELD_INIT +#undef DEFINE_ENTRY_FIELD + +jint StubRoutines::_verify_oop_count = 0; + +address StubRoutines::_string_indexof_array[4] = { nullptr }; address StubRoutines::_vector_f_math[VectorSupport::NUM_VEC_SIZES][VectorSupport::NUM_VECTOR_OP_MATH] = {{nullptr}, {nullptr}}; address StubRoutines::_vector_d_math[VectorSupport::NUM_VEC_SIZES][VectorSupport::NUM_VECTOR_OP_MATH] = {{nullptr}, {nullptr}}; -address StubRoutines::_method_entry_barrier = nullptr; -address StubRoutines::_array_sort = nullptr; -address StubRoutines::_array_partition = nullptr; +const char* StubRoutines::get_blob_name(StubGenBlobId id) { + assert(0 <= id && id < StubGenBlobId::NUM_BLOBIDS, "invalid blob id"); + return _blob_names[id]; +} + +const char* StubRoutines::get_stub_name(StubGenStubId id) { + assert(0 <= id && id < StubGenStubId::NUM_STUBIDS, "invalid stub id"); + return _stub_names[id]; +} + +#ifdef ASSERT + +// array holding start and end indices for stub ids associated with a +// given blob. Given a blob with id (StubGenBlobId) blob_id for any +// stub with id (StubGenStubId) stub_id declared within the blob: +// _blob_offsets[blob_id] <= stub_id < _blob_offsets[blob_id+1] + +static int _blob_limits[StubGenBlobId::NUM_BLOBIDS + 1]; + +// macro used to compute blob limits +#define BLOB_COUNT(blob_name) \ + counter += StubGenStubId_ ## blob_name :: NUM_STUBIDS_ ## blob_name; \ + _blob_limits[++index] = counter; \ + +// macro that checks stubs are associated with the correct blobs +#define STUB_VERIFY(blob_name, stub_name) \ + localStubId = (int) (StubGenStubId_ ## blob_name :: blob_name ## _ ## stub_name ## _id); \ + globalStubId = (int) (StubGenStubId:: stub_name ## _id); \ + blobId = (int) (StubGenBlobId:: blob_name ## _id); \ + assert((globalStubId >= _blob_limits[blobId] && \ + globalStubId < _blob_limits[blobId+1]), \ + "stub " # stub_name " uses incorrect blob name " # blob_name); \ + assert(globalStubId == _blob_limits[blobId] + localStubId, \ + "stub " # stub_name " id found at wrong offset!"); \ + +bool verifyStubIds() { + // first compute the blob limits + int counter = 0; + int index = 0; + // populate offsets table with cumulative total of local enum counts + STUBGEN_BLOBS_DO(BLOB_COUNT); + + // ensure 1) global stub ids lie in the range of the associated blob + // and 2) each blob's base + local stub id == global stub id + int globalStubId, blobId, localStubId; + STUBGEN_STUBS_DO(STUB_VERIFY); + return true; +} -address StubRoutines::_cont_thaw = nullptr; -address StubRoutines::_cont_returnBarrier = nullptr; -address StubRoutines::_cont_returnBarrierExc = nullptr; -address StubRoutines::_cont_preempt_stub = nullptr; +#undef BLOB_COUNT +#undef STUB_VERIFY -address StubRoutines::_upcall_stub_exception_handler = nullptr; -address StubRoutines::_upcall_stub_load_target = nullptr; +// ensure we verify the blob ids when this compile unit is first entered +bool _verified_stub_ids = verifyStubIds(); -address StubRoutines::_lookup_secondary_supers_table_slow_path_stub = nullptr; -address StubRoutines::_lookup_secondary_supers_table_stubs[Klass::SECONDARY_SUPERS_TABLE_SIZE] = { nullptr }; +// macro used by stub to blob translation + +#define BLOB_CHECK_OFFSET(blob_name) \ + if (id < _blob_limits[((int)blobId) + 1]) { return blobId; } \ + blobId = StubGenBlobId:: blob_name ## _id; \ + +// translate a global stub id to an associated blob id based on the +// computed blob limits + +StubGenBlobId StubRoutines::stub_to_blob(StubGenStubId stubId) { + int id = (int)stubId; + assert(id > ((int)StubGenStubId::NO_STUBID) && id < ((int)StubGenStubId::NUM_STUBIDS), "stub id out of range!"); + // start with no blob to catch stub id == -1 + StubGenBlobId blobId = StubGenBlobId::NO_BLOBID; + STUBGEN_BLOBS_DO(BLOB_CHECK_OFFSET); + // if we reach here we should have the last blob id + assert(blobId == StubGenBlobId::NUM_BLOBIDS - 1, "unexpected blob id"); + return blobId; +} + +#endif // ASSERT // Initialization // @@ -200,7 +187,7 @@ address StubRoutines::_lookup_secondary_supers_table_stubs[Klass::SECONDARY_SUPE // The first one generates stubs needed during universe init (e.g., _handle_must_compile_first_entry). // The second phase includes all other stubs (which may depend on universe being initialized.) -extern void StubGenerator_generate(CodeBuffer* code, StubCodeGenerator::StubsKind kind); // only interface to generators +extern void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id); // only interface to generators void UnsafeMemoryAccess::create_table(int max_size) { UnsafeMemoryAccess::_table = new UnsafeMemoryAccess[max_size]; @@ -228,7 +215,7 @@ address UnsafeMemoryAccess::page_error_continue_pc(address pc) { } -static BufferBlob* initialize_stubs(StubCodeGenerator::StubsKind kind, +static BufferBlob* initialize_stubs(StubGenBlobId blob_id, int code_size, int max_aligned_stubs, const char* timer_msg, const char* buffer_name, @@ -242,7 +229,7 @@ static BufferBlob* initialize_stubs(StubCodeGenerator::StubsKind kind, vm_exit_out_of_memory(code_size, OOM_MALLOC_ERROR, "CodeCache: no room for %s", buffer_name); } CodeBuffer buffer(stubs_code); - StubGenerator_generate(&buffer, kind); + StubGenerator_generate(&buffer, blob_id); // When new stubs added we need to make sure there is some space left // to catch situation when we should increase size again. assert(code_size == 0 || buffer.insts_remaining() > 200, "increase %s", assert_msg); @@ -257,49 +244,42 @@ static BufferBlob* initialize_stubs(StubCodeGenerator::StubsKind kind, return stubs_code; } -void StubRoutines::initialize_initial_stubs() { - if (_initial_stubs_code == nullptr) { - _initial_stubs_code = initialize_stubs(StubCodeGenerator::Initial_stubs, - _initial_stubs_code_size, 10, - "StubRoutines generation initial stubs", - "StubRoutines (initial stubs)", - "_initial_stubs_code_size"); +#define DEFINE_BLOB_INIT_METHOD(blob_name) \ + void StubRoutines::initialize_ ## blob_name ## _stubs() { \ + if (STUBGEN_BLOB_FIELD_NAME(blob_name) == nullptr) { \ + StubGenBlobId blob_id = StubGenBlobId:: STUB_ID_NAME(blob_name); \ + int size = _ ## blob_name ## _code_size; \ + int max_aligned_size = 10; \ + const char* timer_msg = "StubRoutines generation " # blob_name " stubs"; \ + const char* name = "StubRoutines (" # blob_name "stubs)"; \ + const char* assert_msg = "_" # blob_name "_code_size"; \ + STUBGEN_BLOB_FIELD_NAME(blob_name) = \ + initialize_stubs(blob_id, size, max_aligned_size, timer_msg, \ + name, assert_msg); \ + } \ } -} -void StubRoutines::initialize_continuation_stubs() { - if (_continuation_stubs_code == nullptr) { - _continuation_stubs_code = initialize_stubs(StubCodeGenerator::Continuation_stubs, - _continuation_stubs_code_size, 10, - "StubRoutines generation continuation stubs", - "StubRoutines (continuation stubs)", - "_continuation_stubs_code_size"); - } -} -void StubRoutines::initialize_compiler_stubs() { - if (_compiler_stubs_code == nullptr) { - _compiler_stubs_code = initialize_stubs(StubCodeGenerator::Compiler_stubs, - _compiler_stubs_code_size, 100, - "StubRoutines generation compiler stubs", - "StubRoutines (compiler stubs)", - "_compiler_stubs_code_size"); - } -} +STUBGEN_BLOBS_DO(DEFINE_BLOB_INIT_METHOD) -void StubRoutines::initialize_final_stubs() { - if (_final_stubs_code == nullptr) { - _final_stubs_code = initialize_stubs(StubCodeGenerator::Final_stubs, - _final_stubs_code_size, 10, - "StubRoutines generation final stubs", - "StubRoutines (final stubs)", - "_final_stubs_code_size"); - } +#undef DEFINE_BLOB_INIT_METHOD + + +#define DEFINE_BLOB_INIT_FUNCTION(blob_name) \ +void blob_name ## _stubs_init() { \ + StubRoutines::initialize_ ## blob_name ## _stubs(); \ } -void initial_stubs_init() { StubRoutines::initialize_initial_stubs(); } -void continuation_stubs_init() { StubRoutines::initialize_continuation_stubs(); } -void final_stubs_init() { StubRoutines::initialize_final_stubs(); } +STUBGEN_BLOBS_DO(DEFINE_BLOB_INIT_FUNCTION) + +#undef DEFINE_BLOB_INIT_FUNCTION + +/* + * we generate the underlying driver method but this wrapper is needed + * to perform special handling depending on where the compiler init + * gets called from. it ought to be possible to remove this at some + * point and have adeterminate ordered init. + */ void compiler_stubs_init(bool in_compiler_thread) { if (in_compiler_thread && DelayCompilerStubsGeneration) { @@ -316,6 +296,7 @@ void compiler_stubs_init(bool in_compiler_thread) { } } + // // Default versions of arraycopy functions // diff --git a/src/hotspot/share/runtime/stubRoutines.hpp b/src/hotspot/share/runtime/stubRoutines.hpp index 4c96738ce4b1c..3189415a6c5d6 100644 --- a/src/hotspot/share/runtime/stubRoutines.hpp +++ b/src/hotspot/share/runtime/stubRoutines.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,6 +31,7 @@ #include "runtime/frame.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/stubCodeGenerator.hpp" +#include "runtime/stubDeclarations.hpp" #include "runtime/threadWXSetters.inline.hpp" #include "utilities/macros.hpp" @@ -150,189 +151,134 @@ class UnsafeMemoryAccessMark : public StackObj { ~UnsafeMemoryAccessMark(); }; +// declare stubgen blob id enum + +#define BLOB_ENUM_DECLARE(blob_name) \ + STUB_ID_NAME(blob_name), + +enum StubGenBlobId : int { + NO_BLOBID = -1, + STUBGEN_BLOBS_DO(BLOB_ENUM_DECLARE) + NUM_BLOBIDS +}; + +#undef BLOB_ENUM_DECLARE + +// declare blob local stub id enums + +#define BLOB_LOCAL_ENUM_START(blob_name) \ + enum StubGenStubId_ ## blob_name { \ + NO_STUBID_ ## blob_name = -1, + +#define BLOB_LOCAL_ENUM_END(blob_name) \ + NUM_STUBIDS_ ## blob_name \ + }; + +#define BLOB_LOCAL_STUB_ENUM_DECLARE(blob_name, stub_name) \ + blob_name ## _ ## stub_name ## _id, + +STUBGEN_BLOBS_STUBS_DO(BLOB_LOCAL_ENUM_START, BLOB_LOCAL_ENUM_END, BLOB_LOCAL_STUB_ENUM_DECLARE) + +#undef BLOB_LOCAL_ENUM_START +#undef BLOB_LOCAL_ENUM_END +#undef BLOB_LOCAL_STUB_ENUM_DECLARE + +// declare global stub id enum + +#define STUB_ENUM_DECLARE(blob_name, stub_name) \ + STUB_ID_NAME(stub_name) , + +enum StubGenStubId : int { + NO_STUBID = -1, + STUBGEN_STUBS_DO(STUB_ENUM_DECLARE) + NUM_STUBIDS +}; + +#undef STUB_ENUM_DECLARE + class StubRoutines: AllStatic { - public: +public: // Dependencies friend class StubGenerator; + friend class VMStructs; +#if INCLUDE_JVMCI + friend class JVMCIVMStructs; +#endif #include CPU_HEADER(stubRoutines) - static jint _verify_oop_count; - static address _verify_oop_subroutine_entry; - - static address _call_stub_return_address; // the return PC, when returning to a call stub - static address _call_stub_entry; - static address _forward_exception_entry; - static address _catch_exception_entry; - - static address _atomic_xchg_entry; - static address _atomic_cmpxchg_entry; - static address _atomic_cmpxchg_long_entry; - static address _atomic_add_entry; - static address _fence_entry; - - static BufferBlob* _initial_stubs_code; // code buffer for initial routines - static BufferBlob* _continuation_stubs_code; // code buffer for continuation stubs - static BufferBlob* _compiler_stubs_code; // code buffer for C2 intrinsics - static BufferBlob* _final_stubs_code; // code buffer for all other routines - - static address _array_sort; - static address _array_partition; - // Leaf routines which implement arraycopy and their addresses - // arraycopy operands aligned on element type boundary - static address _jbyte_arraycopy; - static address _jshort_arraycopy; - static address _jint_arraycopy; - static address _jlong_arraycopy; - static address _oop_arraycopy, _oop_arraycopy_uninit; - static address _jbyte_disjoint_arraycopy; - static address _jshort_disjoint_arraycopy; - static address _jint_disjoint_arraycopy; - static address _jlong_disjoint_arraycopy; - static address _oop_disjoint_arraycopy, _oop_disjoint_arraycopy_uninit; - - // arraycopy operands aligned on zero'th element boundary - // These are identical to the ones aligned aligned on an - // element type boundary, except that they assume that both - // source and destination are HeapWord aligned. - static address _arrayof_jbyte_arraycopy; - static address _arrayof_jshort_arraycopy; - static address _arrayof_jint_arraycopy; - static address _arrayof_jlong_arraycopy; - static address _arrayof_oop_arraycopy, _arrayof_oop_arraycopy_uninit; - static address _arrayof_jbyte_disjoint_arraycopy; - static address _arrayof_jshort_disjoint_arraycopy; - static address _arrayof_jint_disjoint_arraycopy; - static address _arrayof_jlong_disjoint_arraycopy; - static address _arrayof_oop_disjoint_arraycopy, _arrayof_oop_disjoint_arraycopy_uninit; - - // cache line writeback - static address _data_cache_writeback; - static address _data_cache_writeback_sync; - - // these are recommended but optional: - static address _checkcast_arraycopy, _checkcast_arraycopy_uninit; - static address _unsafe_arraycopy; - static address _generic_arraycopy; - - static address _unsafe_setmemory; - - static address _jbyte_fill; - static address _jshort_fill; - static address _jint_fill; - static address _arrayof_jbyte_fill; - static address _arrayof_jshort_fill; - static address _arrayof_jint_fill; - - static address _aescrypt_encryptBlock; - static address _aescrypt_decryptBlock; - static address _cipherBlockChaining_encryptAESCrypt; - static address _cipherBlockChaining_decryptAESCrypt; - static address _electronicCodeBook_encryptAESCrypt; - static address _electronicCodeBook_decryptAESCrypt; - static address _counterMode_AESCrypt; - static address _galoisCounterMode_AESCrypt; - static address _ghash_processBlocks; - static address _chacha20Block; - static address _base64_encodeBlock; - static address _base64_decodeBlock; - static address _poly1305_processBlocks; - static address _intpoly_montgomeryMult_P256; - static address _intpoly_assign; - - static address _md5_implCompress; - static address _md5_implCompressMB; - static address _sha1_implCompress; - static address _sha1_implCompressMB; - static address _sha256_implCompress; - static address _sha256_implCompressMB; - static address _sha512_implCompress; - static address _sha512_implCompressMB; - static address _sha3_implCompress; - static address _sha3_implCompressMB; - - static address _updateBytesCRC32; - static address _crc_table_adr; +// declare blob and stub name storage and associated lookup methods - static address _string_indexof_array[4]; +private: + static bool _inited_names; + static const char* _blob_names[StubGenBlobId::NUM_BLOBIDS]; + static const char* _stub_names[StubGenStubId::NUM_STUBIDS]; - static address _crc32c_table_addr; - static address _updateBytesCRC32C; - static address _updateBytesAdler32; - - static address _multiplyToLen; - static address _squareToLen; - static address _mulAdd; - static address _montgomeryMultiply; - static address _montgomerySquare; - static address _bigIntegerRightShiftWorker; - static address _bigIntegerLeftShiftWorker; - - static address _vectorizedMismatch; - - static address _dexp; - static address _dlog; - static address _dlog10; - static address _dpow; - static address _dsin; - static address _dcos; - static address _dlibm_sin_cos_huge; - static address _dlibm_reduce_pi04l; - static address _dlibm_tan_cot_huge; - static address _dtan; - static address _dtanh; - static address _fmod; - - static address _f2hf; - static address _hf2f; - - static address _method_entry_barrier; - - static address _cont_thaw; - static address _cont_returnBarrier; - static address _cont_returnBarrierExc; - static address _cont_preempt_stub; +public: + static bool init_names(); + static const char* get_blob_name(StubGenBlobId id); + static const char* get_stub_name(StubGenStubId id); - // Vector Math Routines - static address _vector_f_math[VectorSupport::NUM_VEC_SIZES][VectorSupport::NUM_VECTOR_OP_MATH]; - static address _vector_d_math[VectorSupport::NUM_VEC_SIZES][VectorSupport::NUM_VECTOR_OP_MATH]; +// declare blob fields - static address _upcall_stub_exception_handler; - static address _upcall_stub_load_target; +#define DECLARE_BLOB_FIELD(blob_name) \ + static BufferBlob* STUBGEN_BLOB_FIELD_NAME(blob_name); - static address _lookup_secondary_supers_table_stubs[]; - static address _lookup_secondary_supers_table_slow_path_stub; +private: + STUBGEN_BLOBS_DO(DECLARE_BLOB_FIELD); - public: - // Initialization/Testing - static void initialize_initial_stubs(); // must happen before universe::genesis - static void initialize_continuation_stubs(); // must happen after universe::genesis - static void initialize_compiler_stubs(); // must happen after universe::genesis - static void initialize_final_stubs(); // must happen after universe::genesis +#undef DECLARE_BLOB_FIELD - static bool is_stub_code(address addr) { return contains(addr); } +// declare fields to store entry addresses - static bool contains(address addr) { - return - (_initial_stubs_code != nullptr && _initial_stubs_code->blob_contains(addr)) || - (_continuation_stubs_code != nullptr && _continuation_stubs_code->blob_contains(addr)) || - (_compiler_stubs_code != nullptr && _compiler_stubs_code->blob_contains(addr)) || - (_final_stubs_code != nullptr && _final_stubs_code->blob_contains(addr)) ; - } +#define DECLARE_ENTRY_FIELD(blob_name, stub_name, field_name, getter_name) \ + static address STUB_FIELD_NAME(field_name); - static RuntimeBlob* initial_stubs_code() { return _initial_stubs_code; } - static RuntimeBlob* continuation_stubs_code() { return _continuation_stubs_code; } - static RuntimeBlob* compiler_stubs_code() { return _compiler_stubs_code; } - static RuntimeBlob* final_stubs_code() { return _final_stubs_code; } +#define DECLARE_ENTRY_FIELD_INIT(blob_name, stub_name, field_name, getter_name, init_function) \ + DECLARE_ENTRY_FIELD(blob_name, stub_name, field_name, getter_name) - // Debugging - static jint verify_oop_count() { return _verify_oop_count; } - static jint* verify_oop_count_addr() { return &_verify_oop_count; } - // a subroutine for debugging the GC - static address verify_oop_subroutine_entry_address() { return (address)&_verify_oop_subroutine_entry; } +#define DECLARE_ENTRY_FIELD_ARRAY(blob_name, stub_name, field_name, getter_name, count) \ + static address STUB_FIELD_NAME(field_name)[count]; + +private: + STUBGEN_ENTRIES_DO(DECLARE_ENTRY_FIELD, DECLARE_ENTRY_FIELD_INIT, DECLARE_ENTRY_FIELD_ARRAY); + +#undef DECLARE_ENTRY_FIELD_ARRAY +#undef DECLARE_ENTRY_FIELD_INIT +#undef DECLARE_ENTRY_FIELD + +// declare getters and setters for entry addresses + +#define DEFINE_ENTRY_GETTER(blob_name, stub_name, field_name, getter_name) \ + static address getter_name() { return STUB_FIELD_NAME(field_name); } \ + +#define DEFINE_ENTRY_GETTER_INIT(blob_name, stub_name, field_name, getter_name, init_function) \ + DEFINE_ENTRY_GETTER(blob_name, stub_name, field_name, getter_name) + +#define DEFINE_ENTRY_GETTER_ARRAY(blob_name, stub_name, field_name, getter_name, count) \ + static address getter_name(int idx) { \ + assert(idx < count, "out of bounds"); \ + return STUB_FIELD_NAME(field_name)[idx]; \ + } \ + +public: + STUBGEN_ENTRIES_DO(DEFINE_ENTRY_GETTER, DEFINE_ENTRY_GETTER_INIT, DEFINE_ENTRY_GETTER_ARRAY); + +#undef DEFINE_ENTRY_GETTER_ARRAY +#undef DEFINE_ENTRY_GETTER_INIT +#undef DEFINE_ENTRY_GETTER + +public: + +#define DECLARE_BLOB_INIT_METHOD(blob_name) \ + static void initialize_ ## blob_name ## _stubs(); + + STUBGEN_BLOBS_DO(DECLARE_BLOB_INIT_METHOD) + +#undef DECLARE_BLOB_INIT_METHOD - static address catch_exception_entry() { return _catch_exception_entry; } +public: // Calls to Java typedef void (*CallStub)( @@ -346,51 +292,76 @@ class StubRoutines: AllStatic { TRAPS ); - static CallStub call_stub() { return CAST_TO_FN_PTR(CallStub, _call_stub_entry); } + static jint _verify_oop_count; + +public: + // this is used by x86_64 to expose string index stubs to the opto + // library as a target to a call planted before back end lowering. + // all other arches plant the call to the stub during back end + // lowering and use arch-specific entries. we really need to + // rationalise this at some point. + + static address _string_indexof_array[4]; + + /* special case: stub employs array of entries */ + + // Vector Math Routines + static address _vector_f_math[VectorSupport::NUM_VEC_SIZES][VectorSupport::NUM_VECTOR_OP_MATH]; + static address _vector_d_math[VectorSupport::NUM_VEC_SIZES][VectorSupport::NUM_VECTOR_OP_MATH]; + + static bool is_stub_code(address addr) { return contains(addr); } + + // generate code to implement method contains + +#define CHECK_ADDRESS_IN_BLOB(blob_name) \ + blob = STUBGEN_BLOB_FIELD_NAME(blob_name); \ + if (blob != nullptr && blob->blob_contains(addr)) { return true; } + + static bool contains(address addr) { + BufferBlob *blob; + STUBGEN_BLOBS_DO(CHECK_ADDRESS_IN_BLOB) + return false; + } +#undef CHECK_ADDRESS_IN_BLOB +// define getters for stub code blobs - // Exceptions - static address forward_exception_entry() { return _forward_exception_entry; } +#define DEFINE_BLOB_GETTER(blob_name) \ + static RuntimeBlob* blob_name ## _stubs_code() { return _ ## blob_name ## _stubs_code; } - static address atomic_xchg_entry() { return _atomic_xchg_entry; } - static address atomic_cmpxchg_entry() { return _atomic_cmpxchg_entry; } - static address atomic_cmpxchg_long_entry() { return _atomic_cmpxchg_long_entry; } - static address atomic_add_entry() { return _atomic_add_entry; } - static address fence_entry() { return _fence_entry; } + STUBGEN_BLOBS_DO(DEFINE_BLOB_GETTER); + +#undef DEFINE_BLOB_GETTER + +#ifdef ASSERT + // provide a translation from stub id to its associated blob id + static StubGenBlobId stub_to_blob(StubGenStubId stubId); +#endif + + // Debugging + static jint verify_oop_count() { return _verify_oop_count; } + static jint* verify_oop_count_addr() { return &_verify_oop_count; } + // a subroutine for debugging the GC + static address verify_oop_subroutine_entry_address() { return (address)&_verify_oop_subroutine_entry; } + + static CallStub call_stub() { return CAST_TO_FN_PTR(CallStub, _call_stub_entry); } static address select_arraycopy_function(BasicType t, bool aligned, bool disjoint, const char* &name, bool dest_uninitialized); - static address jbyte_arraycopy() { return _jbyte_arraycopy; } - static address jshort_arraycopy() { return _jshort_arraycopy; } - static address jint_arraycopy() { return _jint_arraycopy; } - static address jlong_arraycopy() { return _jlong_arraycopy; } static address oop_arraycopy(bool dest_uninitialized = false) { return dest_uninitialized ? _oop_arraycopy_uninit : _oop_arraycopy; } - static address jbyte_disjoint_arraycopy() { return _jbyte_disjoint_arraycopy; } - static address jshort_disjoint_arraycopy() { return _jshort_disjoint_arraycopy; } - static address jint_disjoint_arraycopy() { return _jint_disjoint_arraycopy; } - static address jlong_disjoint_arraycopy() { return _jlong_disjoint_arraycopy; } + static address oop_disjoint_arraycopy(bool dest_uninitialized = false) { return dest_uninitialized ? _oop_disjoint_arraycopy_uninit : _oop_disjoint_arraycopy; } - static address arrayof_jbyte_arraycopy() { return _arrayof_jbyte_arraycopy; } - static address arrayof_jshort_arraycopy() { return _arrayof_jshort_arraycopy; } - static address arrayof_jint_arraycopy() { return _arrayof_jint_arraycopy; } - static address arrayof_jlong_arraycopy() { return _arrayof_jlong_arraycopy; } static address arrayof_oop_arraycopy(bool dest_uninitialized = false) { return dest_uninitialized ? _arrayof_oop_arraycopy_uninit : _arrayof_oop_arraycopy; } - static address arrayof_jbyte_disjoint_arraycopy() { return _arrayof_jbyte_disjoint_arraycopy; } - static address arrayof_jshort_disjoint_arraycopy() { return _arrayof_jshort_disjoint_arraycopy; } - static address arrayof_jint_disjoint_arraycopy() { return _arrayof_jint_disjoint_arraycopy; } - static address arrayof_jlong_disjoint_arraycopy() { return _arrayof_jlong_disjoint_arraycopy; } static address arrayof_oop_disjoint_arraycopy(bool dest_uninitialized = false) { return dest_uninitialized ? _arrayof_oop_disjoint_arraycopy_uninit : _arrayof_oop_disjoint_arraycopy; } - static address data_cache_writeback() { return _data_cache_writeback; } - static address data_cache_writeback_sync() { return _data_cache_writeback_sync; } typedef void (*DataCacheWritebackStub)(void *); static DataCacheWritebackStub DataCacheWriteback_stub() { return CAST_TO_FN_PTR(DataCacheWritebackStub, _data_cache_writeback); } @@ -400,90 +371,13 @@ class StubRoutines: AllStatic { static address checkcast_arraycopy(bool dest_uninitialized = false) { return dest_uninitialized ? _checkcast_arraycopy_uninit : _checkcast_arraycopy; } - static address unsafe_arraycopy() { return _unsafe_arraycopy; } typedef void (*UnsafeArrayCopyStub)(const void* src, void* dst, size_t count); static UnsafeArrayCopyStub UnsafeArrayCopy_stub() { return CAST_TO_FN_PTR(UnsafeArrayCopyStub, _unsafe_arraycopy); } - static address unsafe_setmemory() { return _unsafe_setmemory; } - typedef void (*UnsafeSetMemoryStub)(void* dst, size_t count, char byte); static UnsafeSetMemoryStub UnsafeSetMemory_stub() { return CAST_TO_FN_PTR(UnsafeSetMemoryStub, _unsafe_setmemory); } - static address generic_arraycopy() { return _generic_arraycopy; } - static address select_arraysort_function() { return _array_sort; } - static address select_array_partition_function() { return _array_partition; } - - static address jbyte_fill() { return _jbyte_fill; } - static address jshort_fill() { return _jshort_fill; } - static address jint_fill() { return _jint_fill; } - static address arrayof_jbyte_fill() { return _arrayof_jbyte_fill; } - static address arrayof_jshort_fill() { return _arrayof_jshort_fill; } - static address arrayof_jint_fill() { return _arrayof_jint_fill; } - - static address aescrypt_encryptBlock() { return _aescrypt_encryptBlock; } - static address aescrypt_decryptBlock() { return _aescrypt_decryptBlock; } - static address cipherBlockChaining_encryptAESCrypt() { return _cipherBlockChaining_encryptAESCrypt; } - static address cipherBlockChaining_decryptAESCrypt() { return _cipherBlockChaining_decryptAESCrypt; } - static address electronicCodeBook_encryptAESCrypt() { return _electronicCodeBook_encryptAESCrypt; } - static address electronicCodeBook_decryptAESCrypt() { return _electronicCodeBook_decryptAESCrypt; } - static address poly1305_processBlocks() { return _poly1305_processBlocks; } - static address intpoly_montgomeryMult_P256() { return _intpoly_montgomeryMult_P256; } - static address intpoly_assign() { return _intpoly_assign; } - static address counterMode_AESCrypt() { return _counterMode_AESCrypt; } - static address ghash_processBlocks() { return _ghash_processBlocks; } - static address chacha20Block() { return _chacha20Block; } - static address base64_encodeBlock() { return _base64_encodeBlock; } - static address base64_decodeBlock() { return _base64_decodeBlock; } - static address md5_implCompress() { return _md5_implCompress; } - static address md5_implCompressMB() { return _md5_implCompressMB; } - static address sha1_implCompress() { return _sha1_implCompress; } - static address sha1_implCompressMB() { return _sha1_implCompressMB; } - static address sha256_implCompress() { return _sha256_implCompress; } - static address sha256_implCompressMB() { return _sha256_implCompressMB; } - static address sha512_implCompress() { return _sha512_implCompress; } - static address sha512_implCompressMB() { return _sha512_implCompressMB; } - static address sha3_implCompress() { return _sha3_implCompress; } - static address sha3_implCompressMB() { return _sha3_implCompressMB; } - - static address updateBytesCRC32() { return _updateBytesCRC32; } - static address crc_table_addr() { return _crc_table_adr; } - - static address crc32c_table_addr() { return _crc32c_table_addr; } - static address updateBytesCRC32C() { return _updateBytesCRC32C; } - static address updateBytesAdler32() { return _updateBytesAdler32; } - - static address multiplyToLen() { return _multiplyToLen; } - static address squareToLen() { return _squareToLen; } - static address mulAdd() { return _mulAdd; } - static address montgomeryMultiply() { return _montgomeryMultiply; } - static address montgomerySquare() { return _montgomerySquare; } - static address bigIntegerRightShift() { return _bigIntegerRightShiftWorker; } - static address bigIntegerLeftShift() { return _bigIntegerLeftShiftWorker; } - static address galoisCounterMode_AESCrypt() { return _galoisCounterMode_AESCrypt; } - - static address vectorizedMismatch() { return _vectorizedMismatch; } - - static address dexp() { return _dexp; } - static address dlog() { return _dlog; } - static address dlog10() { return _dlog10; } - static address dpow() { return _dpow; } - static address fmod() { return _fmod; } - static address dsin() { return _dsin; } - static address dcos() { return _dcos; } - static address dlibm_reduce_pi04l() { return _dlibm_reduce_pi04l; } - static address dlibm_sin_cos_huge() { return _dlibm_sin_cos_huge; } - static address dlibm_tan_cot_huge() { return _dlibm_tan_cot_huge; } - static address dtan() { return _dtan; } - static address dtanh() { return _dtanh; } - - // These are versions of the java.lang.Float::floatToFloat16() and float16ToFloat() - // methods which perform the same operations as the intrinsic version. - // They are used for constant folding in JIT compiler to ensure equivalence. - // - static address f2hf_adr() { return _f2hf; } - static address hf2f_adr() { return _hf2f; } - static jshort f2hf(jfloat x) { assert(_f2hf != nullptr, "stub is not implemented on this platform"); MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXExec, Thread::current());) // About to call into code cache @@ -497,39 +391,10 @@ class StubRoutines: AllStatic { return ((hf2f_stub_t)_hf2f)(x); } - static address method_entry_barrier() { return _method_entry_barrier; } - - static address cont_thaw() { return _cont_thaw; } - static address cont_returnBarrier() { return _cont_returnBarrier; } - static address cont_returnBarrierExc(){return _cont_returnBarrierExc; } - static address cont_preempt_stub() { return _cont_preempt_stub; } - - static address upcall_stub_exception_handler() { - assert(_upcall_stub_exception_handler != nullptr, "not implemented"); - return _upcall_stub_exception_handler; - } - - static address upcall_stub_load_target() { - assert(_upcall_stub_load_target != nullptr, "not implemented"); - return _upcall_stub_load_target; - } - - static address lookup_secondary_supers_table_stub(u1 slot) { - assert(slot < Klass::SECONDARY_SUPERS_TABLE_SIZE, "out of bounds"); - assert(_lookup_secondary_supers_table_stubs[slot] != nullptr, "not implemented"); - return _lookup_secondary_supers_table_stubs[slot]; - } - - static address lookup_secondary_supers_table_slow_path_stub() { - assert(_lookup_secondary_supers_table_slow_path_stub != nullptr, "not implemented"); - return _lookup_secondary_supers_table_slow_path_stub; - } - static address select_fill_function(BasicType t, bool aligned, const char* &name); - // - // Default versions of the above arraycopy functions for platforms which do - // not have specialized versions + // Default versions of some of the arraycopy functions for platforms + // which do not have specialized versions // static void jbyte_copy (jbyte* src, jbyte* dest, size_t count); static void jshort_copy (jshort* src, jshort* dest, size_t count); @@ -544,6 +409,7 @@ class StubRoutines: AllStatic { static void arrayof_jlong_copy (HeapWord* src, HeapWord* dest, size_t count); static void arrayof_oop_copy (HeapWord* src, HeapWord* dest, size_t count); static void arrayof_oop_copy_uninit(HeapWord* src, HeapWord* dest, size_t count); + }; #endif // SHARE_RUNTIME_STUBROUTINES_HPP diff --git a/src/hotspot/share/utilities/nativeCallStack.cpp b/src/hotspot/share/utilities/nativeCallStack.cpp index c1f9a88bf0d6f..7b6d4a3660b59 100644 --- a/src/hotspot/share/utilities/nativeCallStack.cpp +++ b/src/hotspot/share/utilities/nativeCallStack.cpp @@ -113,6 +113,6 @@ void NativeCallStack::print_on(outputStream* out) const { DEBUG_ONLY(assert_not_fake();) for (int i = 0; i < NMT_TrackingStackDepth && _stack[i] != nullptr; i++) { print_frame(out, _stack[i]); + out->cr(); } - out->cr(); } diff --git a/src/hotspot/share/utilities/rbTree.hpp b/src/hotspot/share/utilities/rbTree.hpp new file mode 100644 index 0000000000000..9192c7b60115f --- /dev/null +++ b/src/hotspot/share/utilities/rbTree.hpp @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_UTILITIES_RBTREE_HPP +#define SHARE_UTILITIES_RBTREE_HPP + +#include "nmt/memTag.hpp" +#include "runtime/os.hpp" +#include "utilities/globalDefinitions.hpp" +#include + +// COMPARATOR must have a static function `cmp(a,b)` which returns: +// - an int < 0 when a < b +// - an int == 0 when a == b +// - an int > 0 when a > b +// ALLOCATOR must check for oom and exit, as RBTree currently does not handle the +// allocation failing. +// Key needs to be of a type that is trivially destructible. +// The tree will call a value's destructor when its node is removed. +// Nodes are address stable and will not change during its lifetime. + +template +class RBTree { + friend class RBTreeTest; + +private: + ALLOCATOR _allocator; + size_t _num_nodes; + +public: + class RBNode { + friend RBTree; + friend class RBTreeTest; + + private: + uintptr_t _parent; // LSB encodes color information. 0 = RED, 1 = BLACK + RBNode* _left; + RBNode* _right; + + const K _key; + V _value; + + DEBUG_ONLY(bool _visited); + + public: + const K& key() const { return _key; } + V& val() { return _value; } + const V& val() const { return _value; } + + private: + bool is_black() const { return (_parent & 0x1) != 0; } + bool is_red() const { return (_parent & 0x1) == 0; } + + void set_black() { _parent |= 0x1; } + void set_red() { _parent &= ~0x1; } + + RBNode* parent() const { return (RBNode*)(_parent & ~0x1); } + void set_parent(RBNode* new_parent) { _parent = (_parent & 0x1) | (uintptr_t)new_parent; } + + RBNode(const K& key, const V& val DEBUG_ONLY(COMMA bool visited)) + : _parent(0), _left(nullptr), _right(nullptr), + _key(key), _value(val) DEBUG_ONLY(COMMA _visited(visited)) {} + + bool is_right_child() const { + return parent() != nullptr && parent()->_right == this; + } + + bool is_left_child() const { + return parent() != nullptr && parent()->_left == this; + } + + void replace_child(RBNode* old_child, RBNode* new_child); + + // This node down, right child up + // Returns right child (now parent) + RBNode* rotate_left(); + + // This node down, left child up + // Returns left child (now parent) + RBNode* rotate_right(); + + RBNode* prev(); + + RBNode* next(); + + #ifdef ASSERT + void verify(size_t& num_nodes, size_t& black_nodes_until_leaf, + size_t& shortest_leaf_path, size_t& longest_leaf_path, + size_t& tree_depth, bool expect_visited); +#endif // ASSERT + }; + +private: + RBNode* _root; + DEBUG_ONLY(bool _expected_visited); + + RBNode* allocate_node(const K& key, const V& val) { + void* node_place = _allocator.allocate(sizeof(RBNode)); + assert(node_place != nullptr, "rb-tree allocator must exit on failure"); + _num_nodes++; + return new (node_place) RBNode(key, val DEBUG_ONLY(COMMA _expected_visited)); + } + + void free_node(RBNode* node) { + node->_value.~V(); + _allocator.free(node); + _num_nodes--; + } + + // True if node is black (nil nodes count as black) + static inline bool is_black(const RBNode* node) { + return node == nullptr || node->is_black(); + } + + static inline bool is_red(const RBNode* node) { + return node != nullptr && node->is_red(); + } + + + // If the node with key k already exist, the value is updated instead. + RBNode* insert_node(const K& key, const V& val); + + void fix_insert_violations(RBNode* node); + + void remove_black_leaf(RBNode* node); + + // Assumption: node has at most one child. Two children is handled in `remove()` + void remove_from_tree(RBNode* node); + +public: + NONCOPYABLE(RBTree); + + RBTree() : _allocator(), _num_nodes(0), _root(nullptr) DEBUG_ONLY(COMMA _expected_visited(false)) { + static_assert(std::is_trivially_destructible::value, "key type must be trivially destructable"); + } + ~RBTree() { this->remove_all(); } + + size_t size() { return _num_nodes; } + + // Inserts a node with the given k/v into the tree, + // if the key already exist, the value is updated instead. + void upsert(const K& key, const V& val) { + RBNode* node = insert_node(key, val); + fix_insert_violations(node); + } + + // Removes the node with the given key from the tree if it exists. + // Returns true if the node was successfully removed, false otherwise. + bool remove(const K& key) { + RBNode* node = find_node(key); + if (node == nullptr){ + return false; + } + remove(node); + return true; + } + + // Removes the given node from the tree. node must be a valid node + void remove(RBNode* node); + + // Removes all existing nodes from the tree. + void remove_all() { + RBNode* to_delete[64]; + int stack_idx = 0; + to_delete[stack_idx++] = _root; + + while (stack_idx > 0) { + RBNode* head = to_delete[--stack_idx]; + if (head == nullptr) continue; + to_delete[stack_idx++] = head->_left; + to_delete[stack_idx++] = head->_right; + free_node(head); + } + _num_nodes = 0; + _root = nullptr; + } + + // Finds the node with the closest key <= the given key + const RBNode* closest_leq(const K& key) const { + RBNode* candidate = nullptr; + RBNode* pos = _root; + while (pos != nullptr) { + const int cmp_r = COMPARATOR::cmp(pos->key(), key); + if (cmp_r == 0) { // Exact match + candidate = pos; + break; // Can't become better than that. + } + if (cmp_r < 0) { + // Found a match, try to find a better one. + candidate = pos; + pos = pos->_right; + } else { + pos = pos->_left; + } + } + return candidate; + } + + // Finds the node with the closest key > the given key + const RBNode* closest_gt(const K& key) const { + RBNode* candidate = nullptr; + RBNode* pos = _root; + while (pos != nullptr) { + const int cmp_r = COMPARATOR::cmp(pos->key(), key); + if (cmp_r > 0) { + // Found a match, try to find a better one. + candidate = pos; + pos = pos->_left; + } else { + pos = pos->_right; + } + } + return candidate; + } + + // Finds the node with the closest key >= the given key + const RBNode* closest_geq(const K& key) const { + RBNode* candidate = nullptr; + RBNode* pos = _root; + while (pos != nullptr) { + const int cmp_r = COMPARATOR::cmp(pos->key(), key); + if (cmp_r == 0) { // Exact match + candidate = pos; + break; // Can't become better than that. + } + if (cmp_r > 0) { + // Found a match, try to find a better one. + candidate = pos; + pos = pos->_left; + } else { + pos = pos->_right; + } + } + return candidate; + } + + RBNode* closest_leq(const K& key) { + return const_cast( + static_cast*>(this)->closest_leq(key)); + } + + RBNode* closest_gt(const K& key) { + return const_cast( + static_cast*>(this)->closest_gt(key)); + } + + RBNode* closest_geq(const K& key) { + return const_cast( + static_cast*>(this)->closest_geq(key)); + } + + struct Range { + RBNode* start; + RBNode* end; + Range(RBNode* start, RBNode* end) + : start(start), end(end) {} + }; + + // Return the range [start, end) + // where start->key() <= addr < end->key(). + // Failure to find the range leads to start and/or end being null. + Range find_enclosing_range(K key) { + RBNode* start = closest_leq(key); + RBNode* end = closest_gt(key); + return Range(start, end); + } + + // Finds the node associated with the key + const RBNode* find_node(const K& key) const; + + RBNode* find_node(const K& key) { + return const_cast( + static_cast*>(this)->find_node(key)); + } + + // Finds the value associated with the key + V* find(const K& key) { + RBNode* node = find_node(key); + return node == nullptr ? nullptr : &node->val(); + } + + const V* find(const K& key) const { + const RBNode* node = find_node(key); + return node == nullptr ? nullptr : &node->val(); + } + + // Visit all RBNodes in ascending order, calling f on each node. + template + void visit_in_order(F f) const; + + // Visit all RBNodes in ascending order whose keys are in range [from, to), calling f on each node. + template + void visit_range_in_order(const K& from, const K& to, F f); + +#ifdef ASSERT + // Verifies that the tree is correct and holds rb-properties + void verify_self(); +#endif // ASSERT + +}; + +template +class RBTreeCHeapAllocator { +public: + void* allocate(size_t sz) { + void* allocation = os::malloc(sz, mem_tag); + if (allocation == nullptr) { + vm_exit_out_of_memory(sz, OOM_MALLOC_ERROR, + "red-black tree failed allocation"); + } + return allocation; + } + + void free(void* ptr) { os::free(ptr); } +}; + +template +using RBTreeCHeap = RBTree>; + +#endif // SHARE_UTILITIES_RBTREE_HPP diff --git a/src/hotspot/share/utilities/rbTree.inline.hpp b/src/hotspot/share/utilities/rbTree.inline.hpp new file mode 100644 index 0000000000000..95aeb34913feb --- /dev/null +++ b/src/hotspot/share/utilities/rbTree.inline.hpp @@ -0,0 +1,541 @@ +/* + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_UTILITIES_RBTREE_INLINE_HPP +#define SHARE_UTILITIES_RBTREE_INLINE_HPP + +#include "utilities/debug.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/powerOfTwo.hpp" +#include "utilities/rbTree.hpp" + +template +inline void RBTree::RBNode::replace_child( + RBNode* old_child, RBNode* new_child) { + if (_left == old_child) { + _left = new_child; + } else if (_right == old_child) { + _right = new_child; + } else { + ShouldNotReachHere(); + } +} + +template +inline typename RBTree::RBNode* +RBTree::RBNode::rotate_left() { + // This node down, right child up + RBNode* old_right = _right; + + _right = old_right->_left; + if (_right != nullptr) { + _right->set_parent(this); + } + + old_right->set_parent(parent()); + if (parent() != nullptr) { + parent()->replace_child(this, old_right); + } + + old_right->_left = this; + set_parent(old_right); + + return old_right; +} + +template +inline typename RBTree::RBNode* +RBTree::RBNode::rotate_right() { + // This node down, left child up + RBNode* old_left = _left; + + _left = old_left->_right; + if (_left != nullptr) { + _left->set_parent(this); + } + + old_left->set_parent(parent()); + if (parent() != nullptr) { + parent()->replace_child(this, old_left); + } + + old_left->_right = this; + set_parent(old_left); + + return old_left; +} + +template +inline typename RBTree::RBNode* +RBTree::RBNode::prev() { + RBNode* node = this; + if (_left != nullptr) { // right subtree exists + node = _left; + while (node->_right != nullptr) { + node = node->_right; + } + return node; + } + + while (node != nullptr && node->is_left_child()) { + node = node->parent(); + } + return node->parent(); +} + +template +inline typename RBTree::RBNode* +RBTree::RBNode::next() { + RBNode* node = this; + if (_right != nullptr) { // right subtree exists + node = _right; + while (node->_left != nullptr) { + node = node->_left; + } + return node; + } + + while (node != nullptr && node->is_right_child()) { + node = node->parent(); + } + return node->parent(); +} + +#ifdef ASSERT +template +inline void RBTree::RBNode::verify( + size_t& num_nodes, size_t& black_nodes_until_leaf, size_t& shortest_leaf_path, size_t& longest_leaf_path, + size_t& tree_depth, bool expect_visited) { + assert(expect_visited != _visited, "node already visited"); + _visited = !_visited; + + size_t num_black_nodes_left = 0; + size_t shortest_leaf_path_left = 0; + size_t longest_leaf_path_left = 0; + size_t tree_depth_left = 0; + + if (_left != nullptr) { + if (_right == nullptr) { + assert(is_black() && _left->is_red(), "if one child it must be red and node black"); + } + assert(COMPARATOR::cmp(_left->key(), _key) < 0, "left node must be less than parent"); + assert(is_black() || _left->is_black(), "2 red nodes in a row"); + assert(_left->parent() == this, "pointer mismatch"); + _left->verify(num_nodes, num_black_nodes_left, shortest_leaf_path_left, + longest_leaf_path_left, tree_depth_left, expect_visited); + } + + size_t num_black_nodes_right = 0; + size_t shortest_leaf_path_right = 0; + size_t longest_leaf_path_right = 0; + size_t tree_depth_right = 0; + + if (_right != nullptr) { + if (_left == nullptr) { + assert(is_black() && _right->is_red(), "if one child it must be red and node black"); + } + assert(COMPARATOR::cmp(_right->key(), _key) > 0, "right node must be greater than parent"); + assert(is_black() || _left->is_black(), "2 red nodes in a row"); + assert(_right->parent() == this, "pointer mismatch"); + _right->verify(num_nodes, num_black_nodes_right, shortest_leaf_path_right, + longest_leaf_path_right, tree_depth_right, expect_visited); + } + + shortest_leaf_path = MAX2(longest_leaf_path_left, longest_leaf_path_right); + longest_leaf_path = MAX2(longest_leaf_path_left, longest_leaf_path_right); + + assert(shortest_leaf_path <= longest_leaf_path && longest_leaf_path <= shortest_leaf_path * 2, + "tree imbalanced, shortest path: %zu longest: %zu", shortest_leaf_path, longest_leaf_path); + assert(num_black_nodes_left == num_black_nodes_right, + "number of black nodes in left/right subtree should match"); + + num_nodes++; + tree_depth = 1 + MAX2(tree_depth_left, tree_depth_right); + + shortest_leaf_path++; + longest_leaf_path++; + + black_nodes_until_leaf = num_black_nodes_left; + if (is_black()) { + black_nodes_until_leaf++; + } + +} + +#endif // ASSERT + +template +inline const typename RBTree::RBNode* +RBTree::find_node(const K& key) const { + RBNode* curr = _root; + while (curr != nullptr) { + const int key_cmp_k = COMPARATOR::cmp(key, curr->key()); + + if (key_cmp_k == 0) { + return curr; + } else if (key_cmp_k < 0) { + curr = curr->_left; + } else { + curr = curr->_right; + } + } + + return nullptr; +} + +template +inline typename RBTree::RBNode* +RBTree::insert_node(const K& key, const V& val) { + RBNode* curr = _root; + if (curr == nullptr) { // Tree is empty + _root = allocate_node(key, val); + return _root; + } + + RBNode* parent = nullptr; + while (curr != nullptr) { + const int key_cmp_k = COMPARATOR::cmp(key, curr->key()); + + if (key_cmp_k == 0) { + curr->_value = val; + return curr; + } + + parent = curr; + if (key_cmp_k < 0) { + curr = curr->_left; + } else { + curr = curr->_right; + } + } + + // Create and insert new node + RBNode* node = allocate_node(key, val); + node->set_parent(parent); + + const int key_cmp_k = COMPARATOR::cmp(key, parent->key()); + if (key_cmp_k < 0) { + parent->_left = node; + } else { + parent->_right = node; + } + + return node; +} + +template +inline void RBTree::fix_insert_violations(RBNode* node) { + if (node->is_black()) { // node's value was updated + return; // Tree is already correct + } + + RBNode* parent = node->parent(); + while (parent != nullptr && parent->is_red()) { + // Node and parent are both red, creating a red-violation + + RBNode* grandparent = parent->parent(); + if (grandparent == nullptr) { // Parent is the tree root + assert(parent == _root, "parent must be root"); + parent->set_black(); // Color parent black to eliminate the red-violation + return; + } + + RBNode* uncle = parent->is_left_child() ? grandparent->_right : grandparent->_left; + if (is_black(uncle)) { // Parent is red, uncle is black + // Rotate the parent to the position of the grandparent + if (parent->is_left_child()) { + if (node->is_right_child()) { // Node is an "inner" node + // Rotate and swap node and parent to make it an "outer" node + parent->rotate_left(); + parent = node; + } + grandparent->rotate_right(); // Rotate the parent to the position of the grandparent + } else if (parent->is_right_child()) { + if (node->is_left_child()) { // Node is an "inner" node + // Rotate and swap node and parent to make it an "outer" node + parent->rotate_right(); + parent = node; + } + grandparent->rotate_left(); // Rotate the parent to the position of the grandparent + } + + // Swap parent and grandparent colors to eliminate the red-violation + parent->set_black(); + grandparent->set_red(); + + if (_root == grandparent) { + _root = parent; + } + + return; + } + + // Parent and uncle are both red + // Paint both black, paint grandparent red to not create a black-violation + parent->set_black(); + uncle->set_black(); + grandparent->set_red(); + + // Move up two levels to check for new potential red-violation + node = grandparent; + parent = grandparent->parent(); + } +} + +template +inline void RBTree::remove_black_leaf(RBNode* node) { + // Black node removed, balancing needed + RBNode* parent = node->parent(); + while (parent != nullptr) { + // Sibling must exist. If it did not, node would need to be red to not break + // tree properties, and could be trivially removed before reaching here + RBNode* sibling = node->is_left_child() ? parent->_right : parent->_left; + if (is_red(sibling)) { // Sibling red, parent and nephews must be black + assert(is_black(parent), "parent must be black"); + assert(is_black(sibling->_left), "nephew must be black"); + assert(is_black(sibling->_right), "nephew must be black"); + // Swap parent and sibling colors + parent->set_red(); + sibling->set_black(); + + // Rotate parent down and sibling up + if (node->is_left_child()) { + parent->rotate_left(); + sibling = parent->_right; + } else { + parent->rotate_right(); + sibling = parent->_left; + } + + if (_root == parent) { + _root = parent->parent(); + } + // Further balancing needed + } + + RBNode* close_nephew = node->is_left_child() ? sibling->_left : sibling->_right; + RBNode* distant_nephew = node->is_left_child() ? sibling->_right : sibling->_left; + if (is_red(distant_nephew) || is_red(close_nephew)) { + if (is_black(distant_nephew)) { // close red, distant black + // Rotate sibling down and inner nephew up + if (node->is_left_child()) { + sibling->rotate_right(); + } else { + sibling->rotate_left(); + } + + distant_nephew = sibling; + sibling = close_nephew; + + distant_nephew->set_red(); + sibling->set_black(); + } + + // Distant nephew red + // Rotate parent down and sibling up + if (node->is_left_child()) { + parent->rotate_left(); + } else { + parent->rotate_right(); + } + if (_root == parent) { + _root = sibling; + } + + // Swap parent and sibling colors + if (parent->is_black()) { + sibling->set_black(); + } else { + sibling->set_red(); + } + parent->set_black(); + + // Color distant nephew black to restore black balance + distant_nephew->set_black(); + return; + } + + if (is_red(parent)) { // parent red, sibling and nephews black + // Swap parent and sibling colors to restore black balance + sibling->set_red(); + parent->set_black(); + return; + } + + // Parent, sibling, and both nephews black + // Color sibling red and move up one level + sibling->set_red(); + node = parent; + parent = node->parent(); + } +} + +template +inline void RBTree::remove_from_tree(RBNode* node) { + RBNode* parent = node->parent(); + RBNode* left = node->_left; + RBNode* right = node->_right; + if (left != nullptr) { // node has a left only-child + // node must be black, and child red, otherwise a black-violation would + // exist Remove node and color the child black. + assert(right == nullptr, "right must be nullptr"); + assert(is_black(node), "node must be black"); + assert(is_red(left), "child must be red"); + left->set_black(); + left->set_parent(parent); + if (parent == nullptr) { + assert(node == _root, "node must be root"); + _root = left; + } else { + parent->replace_child(node, left); + } + } else if (right != nullptr) { // node has a right only-child + // node must be black, and child red, otherwise a black-violation would + // exist Remove node and color the child black. + assert(left == nullptr, "left must be nullptr"); + assert(is_black(node), "node must be black"); + assert(is_red(right), "child must be red"); + right->set_black(); + right->set_parent(parent); + if (parent == nullptr) { + assert(node == _root, "node must be root"); + _root = right; + } else { + parent->replace_child(node, right); + } + } else { // node has no children + if (node == _root) { // Tree empty + _root = nullptr; + } else { + if (is_black(node)) { + // Removed node is black, creating a black imbalance + remove_black_leaf(node); + } + parent->replace_child(node, nullptr); + } + } +} + +template +inline void RBTree::remove(RBNode* node) { + assert(node != nullptr, "must be"); + + if (node->_left != nullptr && node->_right != nullptr) { // node has two children + // Swap place with the in-order successor and delete there instead + RBNode* curr = node->_right; + while (curr->_left != nullptr) { + curr = curr->_left; + } + + if (_root == node) _root = curr; + + swap(curr->_left, node->_left); + swap(curr->_parent, node->_parent); // Swaps parent and color + + // If node is curr's parent, parent and right pointers become invalid + if (node->_right == curr) { + node->_right = curr->_right; + node->set_parent(curr); + curr->_right = node; + } else { + swap(curr->_right, node->_right); + node->parent()->replace_child(curr, node); + curr->_right->set_parent(curr); + } + + if (curr->parent() != nullptr) curr->parent()->replace_child(node, curr); + curr->_left->set_parent(curr); + + + if (node->_left != nullptr) node->_left->set_parent(node); + if (node->_right != nullptr) node->_right->set_parent(node); + } + + remove_from_tree(node); + free_node(node); +} + +template +template +inline void RBTree::visit_in_order(F f) const { + RBNode* to_visit[64]; + int stack_idx = 0; + RBNode* head = _root; + while (stack_idx > 0 || head != nullptr) { + while (head != nullptr) { + to_visit[stack_idx++] = head; + head = head->_left; + } + head = to_visit[--stack_idx]; + f(head); + head = head->_right; + } +} + +template +template +inline void RBTree::visit_range_in_order(const K& from, const K& to, F f) { + assert(COMPARATOR::cmp(from, to) <= 0, "from must be less or equal to to"); + RBNode* curr = closest_geq(from); + if (curr == nullptr) return; + RBNode* end = closest_geq(to); + + while (curr != nullptr && curr != end) { + f(curr); + curr = curr->next(); + } +} + +#ifdef ASSERT +template +inline void RBTree::verify_self() { + if (_root == nullptr) { + assert(_num_nodes == 0, "rbtree has nodes but no root"); + return; + } + + assert(_root->parent() == nullptr, "root of rbtree has a parent"); + + size_t num_nodes = 0; + size_t black_depth = 0; + size_t tree_depth = 0; + size_t shortest_leaf_path = 0; + size_t longest_leaf_path = 0; + _expected_visited = !_expected_visited; + + _root->verify(num_nodes, black_depth, shortest_leaf_path, longest_leaf_path, tree_depth, _expected_visited); + + const unsigned int maximum_depth = log2i(size() + 1) * 2; + + assert(shortest_leaf_path <= longest_leaf_path && longest_leaf_path <= shortest_leaf_path * 2, + "tree imbalanced, shortest path: %zu longest: %zu", + shortest_leaf_path, longest_leaf_path); + assert(tree_depth <= maximum_depth, "rbtree is too deep"); + assert(size() == num_nodes, + "unexpected number of nodes in rbtree. expected: %zu" + ", actual: %zu", size(), num_nodes); +} +#endif // ASSERT + +#endif // SHARE_UTILITIES_RBTREE_INLINE_HPP diff --git a/src/java.base/share/classes/java/lang/StringLatin1.java b/src/java.base/share/classes/java/lang/StringLatin1.java index 342439d13416f..264664e20e9b2 100644 --- a/src/java.base/share/classes/java/lang/StringLatin1.java +++ b/src/java.base/share/classes/java/lang/StringLatin1.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -715,7 +715,7 @@ static Stream lines(byte[] value) { static void putCharsAt(byte[] val, int index, int c1, int c2, int c3, int c4) { assert index >= 0 && index + 3 < length(val) : "Trusted caller missed bounds check"; // Don't use the putChar method, Its instrinsic will cause C2 unable to combining values into larger stores. - long offset = (long) Unsafe.ARRAY_BYTE_BASE_OFFSET + index; + long offset = Unsafe.ARRAY_BYTE_BASE_OFFSET + index; UNSAFE.putByte(val, offset , (byte)(c1)); UNSAFE.putByte(val, offset + 1, (byte)(c2)); UNSAFE.putByte(val, offset + 2, (byte)(c3)); @@ -725,7 +725,7 @@ static void putCharsAt(byte[] val, int index, int c1, int c2, int c3, int c4) { static void putCharsAt(byte[] val, int index, int c1, int c2, int c3, int c4, int c5) { assert index >= 0 && index + 4 < length(val) : "Trusted caller missed bounds check"; // Don't use the putChar method, Its instrinsic will cause C2 unable to combining values into larger stores. - long offset = (long) Unsafe.ARRAY_BYTE_BASE_OFFSET + index; + long offset = Unsafe.ARRAY_BYTE_BASE_OFFSET + index; UNSAFE.putByte(val, offset , (byte)(c1)); UNSAFE.putByte(val, offset + 1, (byte)(c2)); UNSAFE.putByte(val, offset + 2, (byte)(c3)); diff --git a/src/java.base/share/classes/java/lang/invoke/CallSite.java b/src/java.base/share/classes/java/lang/invoke/CallSite.java index aa2a9e703f8c6..e9e3477f96fd8 100644 --- a/src/java.base/share/classes/java/lang/invoke/CallSite.java +++ b/src/java.base/share/classes/java/lang/invoke/CallSite.java @@ -138,12 +138,6 @@ abstract sealed class CallSite permits ConstantCallSite, MutableCallSite, Volati UNSAFE.storeStoreFence(); // barrier between target and isFrozen updates } - /** - * {@code CallSite} dependency context. - * JVM uses CallSite.context to store nmethod dependencies on the call site target. - */ - private final MethodHandleNatives.CallSiteContext context = MethodHandleNatives.CallSiteContext.make(this); - /** * Returns the type of this call site's target. * Although targets may change, any call site's type is permanent, and can never change to an unequal type. diff --git a/src/java.base/share/classes/java/lang/invoke/MethodHandleNatives.java b/src/java.base/share/classes/java/lang/invoke/MethodHandleNatives.java index 0743b3362f21d..9df7d25258d8e 100644 --- a/src/java.base/share/classes/java/lang/invoke/MethodHandleNatives.java +++ b/src/java.base/share/classes/java/lang/invoke/MethodHandleNatives.java @@ -71,31 +71,6 @@ static native void copyOutBootstrapArguments(Class caller, int[] indexInfo, boolean resolve, Object ifNotAvailable); - /** Represents a context to track nmethod dependencies on CallSite instance target. */ - static class CallSiteContext implements Runnable { - //@Injected JVM_nmethodBucket* vmdependencies; - //@Injected jlong last_cleanup; - - static CallSiteContext make(CallSite cs) { - final CallSiteContext newContext = new CallSiteContext(); - // CallSite instance is tracked by a Cleanable which clears native - // structures allocated for CallSite context. Though the CallSite can - // become unreachable, its Context is retained by the Cleanable instance - // (which is referenced from Cleaner instance which is referenced from - // CleanerFactory class) until cleanup is performed. - CleanerFactory.cleaner().register(cs, newContext); - return newContext; - } - - @Override - public void run() { - MethodHandleNatives.clearCallSiteContext(this); - } - } - - /** Invalidate all recorded nmethods. */ - private static native void clearCallSiteContext(CallSiteContext context); - private static native void registerNatives(); static { registerNatives(); diff --git a/src/java.base/share/classes/java/lang/invoke/VarHandles.java b/src/java.base/share/classes/java/lang/invoke/VarHandles.java index 95b5a01550f34..a21092c6cf748 100644 --- a/src/java.base/share/classes/java/lang/invoke/VarHandles.java +++ b/src/java.base/share/classes/java/lang/invoke/VarHandles.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -196,7 +196,7 @@ static VarHandle makeArrayElementHandle(Class arrayClass) { Class componentType = arrayClass.getComponentType(); - int aoffset = UNSAFE.arrayBaseOffset(arrayClass); + int aoffset = (int) UNSAFE.arrayBaseOffset(arrayClass); int ascale = UNSAFE.arrayIndexScale(arrayClass); int ashift = 31 - Integer.numberOfLeadingZeros(ascale); diff --git a/src/java.base/share/classes/java/time/ZoneOffset.java b/src/java.base/share/classes/java/time/ZoneOffset.java index 48f2a2ded22a2..d93c6e2d46d0b 100644 --- a/src/java.base/share/classes/java/time/ZoneOffset.java +++ b/src/java.base/share/classes/java/time/ZoneOffset.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2025, Alibaba Group Holding Limited. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -85,6 +86,7 @@ import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicReferenceArray; import jdk.internal.vm.annotation.Stable; @@ -134,8 +136,10 @@ public final class ZoneOffset extends ZoneId implements TemporalAccessor, TemporalAdjuster, Comparable, Serializable { - /** Cache of time-zone offset by offset in seconds. */ - private static final ConcurrentMap SECONDS_CACHE = new ConcurrentHashMap<>(16, 0.75f, 4); + /** Cache of time-zone offset by offset in quarters. */ + private static final int SECONDS_PER_QUARTER = 15 * SECONDS_PER_MINUTE; + private static final AtomicReferenceArray QUARTER_CACHE = new AtomicReferenceArray<>(256); + /** Cache of time-zone offset by ID. */ private static final ConcurrentMap ID_CACHE = new ConcurrentHashMap<>(16, 0.75f, 4); @@ -423,12 +427,14 @@ public static ZoneOffset ofTotalSeconds(int totalSeconds) { if (totalSeconds < -MAX_SECONDS || totalSeconds > MAX_SECONDS) { throw new DateTimeException("Zone offset not in valid range: -18:00 to +18:00"); } - if (totalSeconds % (15 * SECONDS_PER_MINUTE) == 0) { - Integer totalSecs = totalSeconds; - ZoneOffset result = SECONDS_CACHE.get(totalSecs); + int quarters = totalSeconds / SECONDS_PER_QUARTER; + if (totalSeconds - quarters * SECONDS_PER_QUARTER == 0) { + // quarters range from -72 to 72, & 0xff maps them to 0-72 and 184-255. + int key = quarters & 0xff; + ZoneOffset result = QUARTER_CACHE.getOpaque(key); if (result == null) { result = new ZoneOffset(totalSeconds); - var existing = SECONDS_CACHE.putIfAbsent(totalSecs, result); + var existing = QUARTER_CACHE.compareAndExchange(key, null, result); if (existing != null) { result = existing; } diff --git a/src/java.base/share/classes/java/util/Formatter.java b/src/java.base/share/classes/java/util/Formatter.java index 6ab07e14594c1..d0b4f6744d0be 100644 --- a/src/java.base/share/classes/java/util/Formatter.java +++ b/src/java.base/share/classes/java/util/Formatter.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2023, Alibaba Group Holding Limited. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -432,7 +432,7 @@ * prefix {@code 'T'} forces this output to upper case. * * {@code 'z'} - * RFC 822 + * RFC 822 * style numeric time zone offset from GMT, e.g. {@code -0800}. This * value will be adjusted as necessary for Daylight Saving Time. For * {@code long}, {@link Long}, and {@link Date} the time zone used is @@ -1720,7 +1720,7 @@ * * {@code 'z'} * '\u007a' - * RFC 822 + * RFC 822 * style numeric time zone offset from GMT, e.g. {@code -0800}. This * value will be adjusted as necessary for Daylight Saving Time. For * {@code long}, {@link Long}, and {@link Date} the time zone used is diff --git a/src/java.base/share/classes/java/util/concurrent/ConcurrentHashMap.java b/src/java.base/share/classes/java/util/concurrent/ConcurrentHashMap.java index 0cabc63f36e3d..b0818d3da6eaf 100644 --- a/src/java.base/share/classes/java/util/concurrent/ConcurrentHashMap.java +++ b/src/java.base/share/classes/java/util/concurrent/ConcurrentHashMap.java @@ -6383,7 +6383,7 @@ public final void compute() { = U.objectFieldOffset(ConcurrentHashMap.class, "cellsBusy"); private static final long CELLVALUE = U.objectFieldOffset(CounterCell.class, "value"); - private static final int ABASE = U.arrayBaseOffset(Node[].class); + private static final long ABASE = U.arrayBaseOffset(Node[].class); private static final int ASHIFT; static { diff --git a/src/java.base/share/classes/java/util/zip/CRC32C.java b/src/java.base/share/classes/java/util/zip/CRC32C.java index b60c564e68325..36d37f3fe267e 100644 --- a/src/java.base/share/classes/java/util/zip/CRC32C.java +++ b/src/java.base/share/classes/java/util/zip/CRC32C.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -222,9 +222,9 @@ private static int updateBytes(int crc, byte[] b, int off, int end) { if (end - off >= 8 && Unsafe.ARRAY_BYTE_INDEX_SCALE == 1) { // align on 8 bytes - int alignLength + long alignLength = (8 - ((Unsafe.ARRAY_BYTE_BASE_OFFSET + off) & 0x7)) & 0x7; - for (int alignEnd = off + alignLength; off < alignEnd; off++) { + for (long alignEnd = off + alignLength; off < alignEnd; off++) { crc = (crc >>> 8) ^ byteTable[(crc ^ b[off]) & 0xFF]; } @@ -238,11 +238,11 @@ private static int updateBytes(int crc, byte[] b, int off, int end) { int secondHalf; if (Unsafe.ADDRESS_SIZE == 4) { // On 32 bit platforms read two ints instead of a single 64bit long - firstHalf = UNSAFE.getInt(b, (long)Unsafe.ARRAY_BYTE_BASE_OFFSET + off); - secondHalf = UNSAFE.getInt(b, (long)Unsafe.ARRAY_BYTE_BASE_OFFSET + off + firstHalf = UNSAFE.getInt(b, Unsafe.ARRAY_BYTE_BASE_OFFSET + off); + secondHalf = UNSAFE.getInt(b, Unsafe.ARRAY_BYTE_BASE_OFFSET + off + Integer.BYTES); } else { - long value = UNSAFE.getLong(b, (long)Unsafe.ARRAY_BYTE_BASE_OFFSET + off); + long value = UNSAFE.getLong(b, Unsafe.ARRAY_BYTE_BASE_OFFSET + off); if (ByteOrder.nativeOrder() == ByteOrder.LITTLE_ENDIAN) { firstHalf = (int) value; secondHalf = (int) (value >>> 32); diff --git a/src/java.base/share/classes/java/util/zip/ZipUtils.java b/src/java.base/share/classes/java/util/zip/ZipUtils.java index a2eb9c50d4aea..99ba96854bb2b 100644 --- a/src/java.base/share/classes/java/util/zip/ZipUtils.java +++ b/src/java.base/share/classes/java/util/zip/ZipUtils.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -174,7 +174,7 @@ public static final int get16(byte[] b, int off) { Preconditions.checkIndex(off, b.length, Preconditions.AIOOBE_FORMATTER); Preconditions.checkIndex(off + 1, b.length, Preconditions.AIOOBE_FORMATTER); return Short.toUnsignedInt( - UNSAFE.getShortUnaligned(b, off + (long) Unsafe.ARRAY_BYTE_BASE_OFFSET, false)); + UNSAFE.getShortUnaligned(b, off + Unsafe.ARRAY_BYTE_BASE_OFFSET, false)); } /** @@ -185,7 +185,7 @@ public static final long get32(byte[] b, int off) { Preconditions.checkIndex(off, b.length, Preconditions.AIOOBE_FORMATTER); Preconditions.checkIndex(off + 3, b.length, Preconditions.AIOOBE_FORMATTER); return Integer.toUnsignedLong( - UNSAFE.getIntUnaligned(b, off + (long) Unsafe.ARRAY_BYTE_BASE_OFFSET, false)); + UNSAFE.getIntUnaligned(b, off + Unsafe.ARRAY_BYTE_BASE_OFFSET, false)); } /** @@ -195,7 +195,7 @@ public static final long get32(byte[] b, int off) { public static final long get64S(byte[] b, int off) { Preconditions.checkIndex(off, b.length, Preconditions.AIOOBE_FORMATTER); Preconditions.checkIndex(off + 7, b.length, Preconditions.AIOOBE_FORMATTER); - return UNSAFE.getLongUnaligned(b, off + (long) Unsafe.ARRAY_BYTE_BASE_OFFSET, false); + return UNSAFE.getLongUnaligned(b, off + Unsafe.ARRAY_BYTE_BASE_OFFSET, false); } /** @@ -206,7 +206,7 @@ public static final long get64S(byte[] b, int off) { public static final int get32S(byte[] b, int off) { Preconditions.checkIndex(off, b.length, Preconditions.AIOOBE_FORMATTER); Preconditions.checkIndex(off + 3, b.length, Preconditions.AIOOBE_FORMATTER); - return UNSAFE.getIntUnaligned(b, off + (long) Unsafe.ARRAY_BYTE_BASE_OFFSET, false); + return UNSAFE.getIntUnaligned(b, off + Unsafe.ARRAY_BYTE_BASE_OFFSET, false); } /* diff --git a/src/java.base/share/classes/jdk/internal/classfile/impl/RawBytecodeHelper.java b/src/java.base/share/classes/jdk/internal/classfile/impl/RawBytecodeHelper.java index 659f41f9699ae..70b0ef6c61235 100644 --- a/src/java.base/share/classes/jdk/internal/classfile/impl/RawBytecodeHelper.java +++ b/src/java.base/share/classes/jdk/internal/classfile/impl/RawBytecodeHelper.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -386,16 +386,16 @@ public int getU1Unchecked(int bci) { } public int getU2Unchecked(int bci) { - return UNSAFE.getCharUnaligned(code.array, (long) Unsafe.ARRAY_BYTE_BASE_OFFSET + bci, true); + return UNSAFE.getCharUnaligned(code.array, Unsafe.ARRAY_BYTE_BASE_OFFSET + bci, true); } public int getShortUnchecked(int bci) { - return UNSAFE.getShortUnaligned(code.array, (long) Unsafe.ARRAY_BYTE_BASE_OFFSET + bci, true); + return UNSAFE.getShortUnaligned(code.array, Unsafe.ARRAY_BYTE_BASE_OFFSET + bci, true); } // used after switch validation public int getIntUnchecked(int bci) { - return UNSAFE.getIntUnaligned(code.array, (long) Unsafe.ARRAY_BYTE_BASE_OFFSET + bci, true); + return UNSAFE.getIntUnaligned(code.array, Unsafe.ARRAY_BYTE_BASE_OFFSET + bci, true); } // non-wide branches diff --git a/src/java.base/share/classes/jdk/internal/foreign/ConfinedSession.java b/src/java.base/share/classes/jdk/internal/foreign/ConfinedSession.java index ec1e8fa7b155b..47dfc69e887dc 100644 --- a/src/java.base/share/classes/jdk/internal/foreign/ConfinedSession.java +++ b/src/java.base/share/classes/jdk/internal/foreign/ConfinedSession.java @@ -86,11 +86,20 @@ void justClose() { * A confined resource list; no races are possible here. */ static final class ConfinedResourceList extends ResourceList { + // The first element of the list is pulled into a separate field + // which helps escape analysis keep track of the instance, allowing + // it to be scalar replaced. + ResourceCleanup cache; + @Override void add(ResourceCleanup cleanup) { if (fst != ResourceCleanup.CLOSED_LIST) { - cleanup.next = fst; - fst = cleanup; + if (cache == null) { + cache = cleanup; + } else { + cleanup.next = fst; + fst = cleanup; + } } else { throw alreadyClosed(); } @@ -101,7 +110,11 @@ void cleanup() { if (fst != ResourceCleanup.CLOSED_LIST) { ResourceCleanup prev = fst; fst = ResourceCleanup.CLOSED_LIST; - cleanup(prev); + RuntimeException pendingException = null; + if (cache != null) { + pendingException = cleanupSingle(cache, pendingException); + } + cleanup(prev, pendingException); } else { throw alreadyClosed(); } diff --git a/src/java.base/share/classes/jdk/internal/foreign/HeapMemorySegmentImpl.java b/src/java.base/share/classes/jdk/internal/foreign/HeapMemorySegmentImpl.java index bd7834af7d4fb..06f82b13691b7 100644 --- a/src/java.base/share/classes/jdk/internal/foreign/HeapMemorySegmentImpl.java +++ b/src/java.base/share/classes/jdk/internal/foreign/HeapMemorySegmentImpl.java @@ -93,7 +93,7 @@ ByteBuffer makeByteBuffer() { throw new UnsupportedOperationException("Not an address to an heap-allocated byte array"); } JavaNioAccess nioAccess = SharedSecrets.getJavaNioAccess(); - return nioAccess.newHeapByteBuffer(baseByte, (int)offset - Utils.BaseAndScale.BYTE.base(), (int) byteSize(), null); + return nioAccess.newHeapByteBuffer(baseByte, (int)(offset - Utils.BaseAndScale.BYTE.base()), (int) byteSize(), null); } // factories diff --git a/src/java.base/share/classes/jdk/internal/foreign/MemorySessionImpl.java b/src/java.base/share/classes/jdk/internal/foreign/MemorySessionImpl.java index cc2e746ce4da9..2163146f1a93e 100644 --- a/src/java.base/share/classes/jdk/internal/foreign/MemorySessionImpl.java +++ b/src/java.base/share/classes/jdk/internal/foreign/MemorySessionImpl.java @@ -261,19 +261,13 @@ public final void run() { } static void cleanup(ResourceCleanup first) { - RuntimeException pendingException = null; + cleanup(first, null); + } + + static void cleanup(ResourceCleanup first, RuntimeException pendingException) { ResourceCleanup current = first; while (current != null) { - try { - current.cleanup(); - } catch (RuntimeException ex) { - if (pendingException == null) { - pendingException = ex; - } else if (ex != pendingException) { - // note: self-suppression is not supported - pendingException.addSuppressed(ex); - } - } + pendingException = cleanupSingle(current, pendingException); current = current.next; } if (pendingException != null) { @@ -281,6 +275,20 @@ static void cleanup(ResourceCleanup first) { } } + static RuntimeException cleanupSingle(ResourceCleanup resource, RuntimeException pendingException) { + try { + resource.cleanup(); + } catch (RuntimeException ex) { + if (pendingException == null) { + pendingException = ex; + } else if (ex != pendingException) { + // note: self-suppression is not supported + pendingException.addSuppressed(ex); + } + } + return pendingException; + } + public abstract static class ResourceCleanup { ResourceCleanup next; diff --git a/src/java.base/share/classes/jdk/internal/foreign/Utils.java b/src/java.base/share/classes/jdk/internal/foreign/Utils.java index 9ff1da4ff2405..391a46c092c19 100644 --- a/src/java.base/share/classes/jdk/internal/foreign/Utils.java +++ b/src/java.base/share/classes/jdk/internal/foreign/Utils.java @@ -298,7 +298,7 @@ public static String toHexString(long value) { return "0x" + Long.toHexString(value); } - public record BaseAndScale(int base, long scale) { + public record BaseAndScale(long base, long scale) { public static final BaseAndScale BYTE = new BaseAndScale(Unsafe.ARRAY_BYTE_BASE_OFFSET, Unsafe.ARRAY_BYTE_INDEX_SCALE); diff --git a/src/java.base/share/classes/jdk/internal/misc/Unsafe.java b/src/java.base/share/classes/jdk/internal/misc/Unsafe.java index e69690f876ffe..dba2e6fa7ed14 100644 --- a/src/java.base/share/classes/jdk/internal/misc/Unsafe.java +++ b/src/java.base/share/classes/jdk/internal/misc/Unsafe.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1043,8 +1043,11 @@ private void checkWritebackEnabled() { * This constant differs from all results that will ever be returned from * {@link #staticFieldOffset}, {@link #objectFieldOffset}, * or {@link #arrayBaseOffset}. + *

    + * The static type is @code long} to emphasize that long arithmetic should + * always be used for offset calculations to avoid overflows. */ - public static final int INVALID_FIELD_OFFSET = -1; + public static final long INVALID_FIELD_OFFSET = -1; /** * Reports the location of a given field in the storage allocation of its @@ -1172,11 +1175,15 @@ public void ensureClassInitialized(Class c) { * for the same class, you may use that scale factor, together with this * base offset, to form new offsets to access elements of arrays of the * given class. + *

    + * The return value is in the range of a {@code int}. The return type is + * {@code long} to emphasize that long arithmetic should always be used + * for offset calculations to avoid overflows. * * @see #getInt(Object, long) * @see #putInt(Object, long, int) */ - public int arrayBaseOffset(Class arrayClass) { + public long arrayBaseOffset(Class arrayClass) { if (arrayClass == null) { throw new NullPointerException(); } @@ -1186,39 +1193,39 @@ public int arrayBaseOffset(Class arrayClass) { /** The value of {@code arrayBaseOffset(boolean[].class)} */ - public static final int ARRAY_BOOLEAN_BASE_OFFSET + public static final long ARRAY_BOOLEAN_BASE_OFFSET = theUnsafe.arrayBaseOffset(boolean[].class); /** The value of {@code arrayBaseOffset(byte[].class)} */ - public static final int ARRAY_BYTE_BASE_OFFSET + public static final long ARRAY_BYTE_BASE_OFFSET = theUnsafe.arrayBaseOffset(byte[].class); /** The value of {@code arrayBaseOffset(short[].class)} */ - public static final int ARRAY_SHORT_BASE_OFFSET + public static final long ARRAY_SHORT_BASE_OFFSET = theUnsafe.arrayBaseOffset(short[].class); /** The value of {@code arrayBaseOffset(char[].class)} */ - public static final int ARRAY_CHAR_BASE_OFFSET + public static final long ARRAY_CHAR_BASE_OFFSET = theUnsafe.arrayBaseOffset(char[].class); /** The value of {@code arrayBaseOffset(int[].class)} */ - public static final int ARRAY_INT_BASE_OFFSET + public static final long ARRAY_INT_BASE_OFFSET = theUnsafe.arrayBaseOffset(int[].class); /** The value of {@code arrayBaseOffset(long[].class)} */ - public static final int ARRAY_LONG_BASE_OFFSET + public static final long ARRAY_LONG_BASE_OFFSET = theUnsafe.arrayBaseOffset(long[].class); /** The value of {@code arrayBaseOffset(float[].class)} */ - public static final int ARRAY_FLOAT_BASE_OFFSET + public static final long ARRAY_FLOAT_BASE_OFFSET = theUnsafe.arrayBaseOffset(float[].class); /** The value of {@code arrayBaseOffset(double[].class)} */ - public static final int ARRAY_DOUBLE_BASE_OFFSET + public static final long ARRAY_DOUBLE_BASE_OFFSET = theUnsafe.arrayBaseOffset(double[].class); /** The value of {@code arrayBaseOffset(Object[].class)} */ - public static final int ARRAY_OBJECT_BASE_OFFSET + public static final long ARRAY_OBJECT_BASE_OFFSET = theUnsafe.arrayBaseOffset(Object[].class); /** @@ -1227,6 +1234,9 @@ public int arrayBaseOffset(Class arrayClass) { * will generally not work properly with accessors like {@link * #getByte(Object, long)}, so the scale factor for such classes is reported * as zero. + *

    + * The computation of the actual memory offset should always use {@code + * long} arithmetic to avoid overflows. * * @see #arrayBaseOffset * @see #getInt(Object, long) @@ -3840,7 +3850,7 @@ private void putShortParts(Object o, long offset, byte i0, byte i1) { private native Object staticFieldBase0(Field f); private native boolean shouldBeInitialized0(Class c); private native void ensureClassInitialized0(Class c); - private native int arrayBaseOffset0(Class arrayClass); + private native int arrayBaseOffset0(Class arrayClass); // public version returns long to promote correct arithmetic private native int arrayIndexScale0(Class arrayClass); private native int getLoadAverage0(double[] loadavg, int nelems); diff --git a/src/java.base/share/classes/jdk/internal/util/ArraysSupport.java b/src/java.base/share/classes/jdk/internal/util/ArraysSupport.java index a24f8389709d0..1a56c3c64fd47 100644 --- a/src/java.base/share/classes/jdk/internal/util/ArraysSupport.java +++ b/src/java.base/share/classes/jdk/internal/util/ArraysSupport.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -457,8 +457,8 @@ public static int mismatch(boolean[] a, int aFromIndex, if (length > 7) { if (a[aFromIndex] != b[bFromIndex]) return 0; - int aOffset = Unsafe.ARRAY_BOOLEAN_BASE_OFFSET + aFromIndex; - int bOffset = Unsafe.ARRAY_BOOLEAN_BASE_OFFSET + bFromIndex; + long aOffset = Unsafe.ARRAY_BOOLEAN_BASE_OFFSET + aFromIndex; + long bOffset = Unsafe.ARRAY_BOOLEAN_BASE_OFFSET + bFromIndex; i = vectorizedMismatch( a, aOffset, b, bOffset, @@ -550,8 +550,8 @@ public static int mismatch(byte[] a, int aFromIndex, if (length > 7) { if (a[aFromIndex] != b[bFromIndex]) return 0; - int aOffset = Unsafe.ARRAY_BYTE_BASE_OFFSET + aFromIndex; - int bOffset = Unsafe.ARRAY_BYTE_BASE_OFFSET + bFromIndex; + long aOffset = Unsafe.ARRAY_BYTE_BASE_OFFSET + aFromIndex; + long bOffset = Unsafe.ARRAY_BYTE_BASE_OFFSET + bFromIndex; i = vectorizedMismatch( a, aOffset, b, bOffset, @@ -599,8 +599,8 @@ public static int mismatch(char[] a, int aFromIndex, if (length > 3) { if (a[aFromIndex] != b[bFromIndex]) return 0; - int aOffset = Unsafe.ARRAY_CHAR_BASE_OFFSET + (aFromIndex << LOG2_ARRAY_CHAR_INDEX_SCALE); - int bOffset = Unsafe.ARRAY_CHAR_BASE_OFFSET + (bFromIndex << LOG2_ARRAY_CHAR_INDEX_SCALE); + long aOffset = Unsafe.ARRAY_CHAR_BASE_OFFSET + (aFromIndex << LOG2_ARRAY_CHAR_INDEX_SCALE); + long bOffset = Unsafe.ARRAY_CHAR_BASE_OFFSET + (bFromIndex << LOG2_ARRAY_CHAR_INDEX_SCALE); i = vectorizedMismatch( a, aOffset, b, bOffset, @@ -648,8 +648,8 @@ public static int mismatch(short[] a, int aFromIndex, if (length > 3) { if (a[aFromIndex] != b[bFromIndex]) return 0; - int aOffset = Unsafe.ARRAY_SHORT_BASE_OFFSET + (aFromIndex << LOG2_ARRAY_SHORT_INDEX_SCALE); - int bOffset = Unsafe.ARRAY_SHORT_BASE_OFFSET + (bFromIndex << LOG2_ARRAY_SHORT_INDEX_SCALE); + long aOffset = Unsafe.ARRAY_SHORT_BASE_OFFSET + (aFromIndex << LOG2_ARRAY_SHORT_INDEX_SCALE); + long bOffset = Unsafe.ARRAY_SHORT_BASE_OFFSET + (bFromIndex << LOG2_ARRAY_SHORT_INDEX_SCALE); i = vectorizedMismatch( a, aOffset, b, bOffset, @@ -697,8 +697,8 @@ public static int mismatch(int[] a, int aFromIndex, if (length > 1) { if (a[aFromIndex] != b[bFromIndex]) return 0; - int aOffset = Unsafe.ARRAY_INT_BASE_OFFSET + (aFromIndex << LOG2_ARRAY_INT_INDEX_SCALE); - int bOffset = Unsafe.ARRAY_INT_BASE_OFFSET + (bFromIndex << LOG2_ARRAY_INT_INDEX_SCALE); + long aOffset = Unsafe.ARRAY_INT_BASE_OFFSET + (aFromIndex << LOG2_ARRAY_INT_INDEX_SCALE); + long bOffset = Unsafe.ARRAY_INT_BASE_OFFSET + (bFromIndex << LOG2_ARRAY_INT_INDEX_SCALE); i = vectorizedMismatch( a, aOffset, b, bOffset, @@ -729,8 +729,8 @@ public static int mismatch(float[] a, int aFromIndex, int i = 0; if (length > 1) { if (Float.floatToRawIntBits(a[aFromIndex]) == Float.floatToRawIntBits(b[bFromIndex])) { - int aOffset = Unsafe.ARRAY_FLOAT_BASE_OFFSET + (aFromIndex << LOG2_ARRAY_FLOAT_INDEX_SCALE); - int bOffset = Unsafe.ARRAY_FLOAT_BASE_OFFSET + (bFromIndex << LOG2_ARRAY_FLOAT_INDEX_SCALE); + long aOffset = Unsafe.ARRAY_FLOAT_BASE_OFFSET + (aFromIndex << LOG2_ARRAY_FLOAT_INDEX_SCALE); + long bOffset = Unsafe.ARRAY_FLOAT_BASE_OFFSET + (bFromIndex << LOG2_ARRAY_FLOAT_INDEX_SCALE); i = vectorizedMismatch( a, aOffset, b, bOffset, @@ -787,8 +787,8 @@ public static int mismatch(long[] a, int aFromIndex, } if (a[aFromIndex] != b[bFromIndex]) return 0; - int aOffset = Unsafe.ARRAY_LONG_BASE_OFFSET + (aFromIndex << LOG2_ARRAY_LONG_INDEX_SCALE); - int bOffset = Unsafe.ARRAY_LONG_BASE_OFFSET + (bFromIndex << LOG2_ARRAY_LONG_INDEX_SCALE); + long aOffset = Unsafe.ARRAY_LONG_BASE_OFFSET + (aFromIndex << LOG2_ARRAY_LONG_INDEX_SCALE); + long bOffset = Unsafe.ARRAY_LONG_BASE_OFFSET + (bFromIndex << LOG2_ARRAY_LONG_INDEX_SCALE); int i = vectorizedMismatch( a, aOffset, b, bOffset, @@ -813,8 +813,8 @@ public static int mismatch(double[] a, int aFromIndex, } int i = 0; if (Double.doubleToRawLongBits(a[aFromIndex]) == Double.doubleToRawLongBits(b[bFromIndex])) { - int aOffset = Unsafe.ARRAY_DOUBLE_BASE_OFFSET + (aFromIndex << LOG2_ARRAY_DOUBLE_INDEX_SCALE); - int bOffset = Unsafe.ARRAY_DOUBLE_BASE_OFFSET + (bFromIndex << LOG2_ARRAY_DOUBLE_INDEX_SCALE); + long aOffset = Unsafe.ARRAY_DOUBLE_BASE_OFFSET + (aFromIndex << LOG2_ARRAY_DOUBLE_INDEX_SCALE); + long bOffset = Unsafe.ARRAY_DOUBLE_BASE_OFFSET + (bFromIndex << LOG2_ARRAY_DOUBLE_INDEX_SCALE); i = vectorizedMismatch( a, aOffset, b, bOffset, diff --git a/src/java.base/share/classes/jdk/internal/util/DecimalDigits.java b/src/java.base/share/classes/jdk/internal/util/DecimalDigits.java index 2c140bfbc134a..d3df46dcd3c4d 100644 --- a/src/java.base/share/classes/jdk/internal/util/DecimalDigits.java +++ b/src/java.base/share/classes/jdk/internal/util/DecimalDigits.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -62,7 +62,7 @@ public final class DecimalDigits { private static final short[] DIGITS; static { - short[] digits = new short[10 * 10]; + short[] digits = new short[128]; for (int i = 0; i < 10; i++) { short hi = (short) (i + '0'); @@ -394,7 +394,7 @@ public static int getChars(long i, int index, char[] buf) { * @param v to convert */ public static void putPair(char[] buf, int charPos, int v) { - int packed = DIGITS[v]; + int packed = DIGITS[v & 0x7f]; buf[charPos ] = (char) (packed & 0xFF); buf[charPos + 1] = (char) (packed >> 8); } @@ -407,7 +407,7 @@ public static void putPair(char[] buf, int charPos, int v) { * @param v to convert */ public static void putPairLatin1(byte[] buf, int charPos, int v) { - int packed = DIGITS[v]; + int packed = DIGITS[v & 0x7f]; putCharLatin1(buf, charPos, packed & 0xFF); putCharLatin1(buf, charPos + 1, packed >> 8); } @@ -420,13 +420,13 @@ public static void putPairLatin1(byte[] buf, int charPos, int v) { * @param v to convert */ public static void putPairUTF16(byte[] buf, int charPos, int v) { - int packed = DIGITS[v]; + int packed = DIGITS[v & 0x7f]; putCharUTF16(buf, charPos, packed & 0xFF); putCharUTF16(buf, charPos + 1, packed >> 8); } private static void putCharLatin1(byte[] buf, int charPos, int c) { - UNSAFE.putByte(buf, ARRAY_BYTE_BASE_OFFSET + (long) charPos, (byte) c); + UNSAFE.putByte(buf, ARRAY_BYTE_BASE_OFFSET + charPos, (byte) c); } private static void putCharUTF16(byte[] buf, int charPos, int c) { diff --git a/src/java.base/share/classes/jdk/internal/util/OctalDigits.java b/src/java.base/share/classes/jdk/internal/util/OctalDigits.java deleted file mode 100644 index c41d0f755743b..0000000000000 --- a/src/java.base/share/classes/jdk/internal/util/OctalDigits.java +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. Oracle designates this - * particular file as subject to the "Classpath" exception as provided - * by Oracle in the LICENSE file that accompanied this code. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -package jdk.internal.util; - -import jdk.internal.access.JavaLangAccess; -import jdk.internal.access.SharedSecrets; -import jdk.internal.vm.annotation.Stable; - -/** - * Digits class for octal digits. - * - * @since 21 - */ -public final class OctalDigits { - private static final JavaLangAccess JLA = SharedSecrets.getJavaLangAccess(); - - @Stable - private static final short[] DIGITS; - - static { - short[] digits = new short[8 * 8]; - - for (int i = 0; i < 8; i++) { - short lo = (short) (i + '0'); - - for (int j = 0; j < 8; j++) { - short hi = (short) ((j + '0') << 8); - digits[(i << 3) + j] = (short) (hi | lo); - } - } - - DIGITS = digits; - } - - /** - * Constructor. - */ - private OctalDigits() { - } - - /** - * Insert digits for long value in buffer from high index to low index. - * - * @param value value to convert - * @param index insert point + 1 - * @param buffer byte buffer to copy into - * - * @return the last index used - */ - public static int getCharsLatin1(long value, int index, byte[] buffer){ - while ((value & ~0x3F) != 0) { - int digits = DIGITS[((int) value) & 0x3F]; - value >>>= 6; - buffer[--index] = (byte) (digits >> 8); - buffer[--index] = (byte) (digits & 0xFF); - } - - int digits = DIGITS[(int) (value & 0x3F)]; - buffer[--index] = (byte) (digits >> 8); - - if (7 < value) { - buffer[--index] = (byte) (digits & 0xFF); - } - - return index; - } - - - /** - * This is a variant of {@link OctalDigits#getCharsLatin1(long, int, byte[])}, but for - * UTF-16 coder. - * - * @param value value to convert - * @param index insert point + 1 - * @param buffer byte buffer to copy into - * - * @return the last index used - */ - public static int getCharsUTF16(long value, int index, byte[] buffer){ - while ((value & ~0x3F) != 0) { - int pair = (int) DIGITS[((int) value) & 0x3F]; - JLA.putCharUTF16(buffer, --index, pair >> 8); - JLA.putCharUTF16(buffer, --index, pair & 0xFF); - value >>>= 6; - } - - int digits = DIGITS[(int) (value & 0x3F)]; - JLA.putCharUTF16(buffer, --index, digits >> 8); - - if (7 < value) { - JLA.putCharUTF16(buffer, --index, digits & 0xFF); - } - - return index; - } - - /** - * Calculate the number of digits required to represent the long. - * - * @param value value to convert - * - * @return number of digits - */ - public static int stringSize(long value) { - return value == 0 ? 1 : ((66 - Long.numberOfLeadingZeros(value)) / 3); - } -} diff --git a/src/java.base/unix/classes/sun/nio/fs/UnixUserDefinedFileAttributeView.java b/src/java.base/unix/classes/sun/nio/fs/UnixUserDefinedFileAttributeView.java index 5b8d50dabf2e8..d2295ce3cc586 100644 --- a/src/java.base/unix/classes/sun/nio/fs/UnixUserDefinedFileAttributeView.java +++ b/src/java.base/unix/classes/sun/nio/fs/UnixUserDefinedFileAttributeView.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -182,7 +182,7 @@ public int read(String name, ByteBuffer dst) throws IOException { int n = read(name, address, rem); // copy from buffer into backing array - long off = dst.arrayOffset() + pos + (long) Unsafe.ARRAY_BYTE_BASE_OFFSET; + long off = dst.arrayOffset() + pos + Unsafe.ARRAY_BYTE_BASE_OFFSET; unsafe.copyMemory(null, address, dst.array(), off, n); dst.position(pos + n); @@ -241,7 +241,7 @@ public int write(String name, ByteBuffer src) throws IOException { if (src.hasArray()) { // copy from backing array into buffer - long off = src.arrayOffset() + pos + (long) Unsafe.ARRAY_BYTE_BASE_OFFSET; + long off = src.arrayOffset() + pos + Unsafe.ARRAY_BYTE_BASE_OFFSET; unsafe.copyMemory(src.array(), off, null, address, rem); } else { // backing array not accessible so transfer via temporary array diff --git a/src/java.desktop/unix/classes/sun/awt/UNIXToolkit.java b/src/java.desktop/unix/classes/sun/awt/UNIXToolkit.java index 5881ba55ef399..4c6b451b7e4de 100644 --- a/src/java.desktop/unix/classes/sun/awt/UNIXToolkit.java +++ b/src/java.desktop/unix/classes/sun/awt/UNIXToolkit.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2004, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,7 +51,6 @@ import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; -import java.util.Arrays; import sun.awt.X11.XBaseWindow; import com.sun.java.swing.plaf.gtk.GTKConstants.TextDirection; @@ -521,6 +520,20 @@ public boolean isRunningOnWayland() { // application icons). private static final WindowFocusListener waylandWindowFocusListener; + private static boolean containsWaylandWindowFocusListener(Window window) { + if (window == null) { + return false; + } + + for (WindowFocusListener focusListener : window.getWindowFocusListeners()) { + if (focusListener == waylandWindowFocusListener) { + return true; + } + } + + return false; + } + static { if (isOnWayland()) { waylandWindowFocusListener = new WindowAdapter() { @@ -530,13 +543,22 @@ public void windowLostFocus(WindowEvent e) { Window oppositeWindow = e.getOppositeWindow(); // The focus can move between the window calling the popup, - // and the popup window itself. + // and the popup window itself or its children. // We only dismiss the popup in other cases. if (oppositeWindow != null) { - if (window == oppositeWindow.getParent() ) { + if (containsWaylandWindowFocusListener(oppositeWindow.getOwner())) { addWaylandWindowFocusListenerToWindow(oppositeWindow); return; } + + Window owner = window.getOwner(); + while (owner != null) { + if (owner == oppositeWindow) { + return; + } + owner = owner.getOwner(); + } + if (window.getParent() == oppositeWindow) { return; } @@ -557,11 +579,11 @@ public void windowLostFocus(WindowEvent e) { } private static void addWaylandWindowFocusListenerToWindow(Window window) { - if (!Arrays - .asList(window.getWindowFocusListeners()) - .contains(waylandWindowFocusListener) - ) { + if (!containsWaylandWindowFocusListener(window)) { window.addWindowFocusListener(waylandWindowFocusListener); + for (Window ownedWindow : window.getOwnedWindows()) { + addWaylandWindowFocusListenerToWindow(ownedWindow); + } } } diff --git a/src/java.desktop/unix/native/libawt/awt/awt_LoadLibrary.c b/src/java.desktop/unix/native/libawt/awt/awt_LoadLibrary.c index d6ff51f801866..356b52dfb3487 100644 --- a/src/java.desktop/unix/native/libawt/awt/awt_LoadLibrary.c +++ b/src/java.desktop/unix/native/libawt/awt/awt_LoadLibrary.c @@ -43,6 +43,12 @@ #define VERBOSE_AWT_DEBUG #endif +#define CHECK_EXCEPTION_FATAL(env, message) \ + if ((*env)->ExceptionCheck(env)) { \ + (*env)->ExceptionDescribe(env); \ + (*env)->FatalError(env, message); \ + } + static void *awtHandle = NULL; typedef jint JNICALL JNI_OnLoad_type(JavaVM *vm, void *reserved); @@ -61,16 +67,13 @@ JNIEXPORT jboolean JNICALL AWTIsHeadless() { env = (JNIEnv *)JNU_GetEnv(jvm, JNI_VERSION_1_2); graphicsEnvClass = (*env)->FindClass(env, "java/awt/GraphicsEnvironment"); - if (graphicsEnvClass == NULL) { - return JNI_TRUE; - } + CHECK_EXCEPTION_FATAL(env, "FindClass java/awt/GraphicsEnvironment failed"); headlessFn = (*env)->GetStaticMethodID(env, graphicsEnvClass, "isHeadless", "()Z"); - if (headlessFn == NULL) { - return JNI_TRUE; - } + CHECK_EXCEPTION_FATAL(env, "GetStaticMethodID isHeadless failed"); isHeadless = (*env)->CallStaticBooleanMethod(env, graphicsEnvClass, headlessFn); + // If an exception occurred, we assume headless mode and carry on. if ((*env)->ExceptionCheck(env)) { (*env)->ExceptionClear(env); return JNI_TRUE; @@ -79,12 +82,6 @@ JNIEXPORT jboolean JNICALL AWTIsHeadless() { return isHeadless; } -#define CHECK_EXCEPTION_FATAL(env, message) \ - if ((*env)->ExceptionCheck(env)) { \ - (*env)->ExceptionClear(env); \ - (*env)->FatalError(env, message); \ - } - /* * Pathnames to the various awt toolkits */ diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsProgressBarUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsProgressBarUI.java index e53cc798e61d0..790dc85166b62 100644 --- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsProgressBarUI.java +++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsProgressBarUI.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,6 +33,7 @@ import java.awt.Graphics2D; import java.awt.Insets; import java.awt.Rectangle; +import java.awt.geom.AffineTransform; import javax.swing.JComponent; import javax.swing.JProgressBar; @@ -128,38 +129,43 @@ protected void paintDeterminate(Graphics g, JComponent c) { if (xp != null) { boolean vertical = (progressBar.getOrientation() == JProgressBar.VERTICAL); boolean isLeftToRight = WindowsGraphicsUtils.isLeftToRight(c); - int barRectWidth = progressBar.getWidth(); - int barRectHeight = progressBar.getHeight()-1; + Graphics2D g2 = (Graphics2D) g; + AffineTransform at = g2.getTransform(); + double scaleX = at.getScaleX(); + double scaleY = at.getScaleY(); + + int barRectWidth = (int)Math.ceil(progressBar.getWidth() * scaleX); + int barRectHeight = (int)Math.ceil(progressBar.getHeight() * scaleY); + // amount of progress to draw - int amountFull = getAmountFull(null, barRectWidth, barRectHeight); + int amountFull = (int)(getAmountFull(null, barRectWidth, barRectHeight) / scaleX); paintXPBackground(g, vertical, barRectWidth, barRectHeight); + // Paint progress if (progressBar.isStringPainted()) { // Do not paint the standard stripes from the skin, because they obscure // the text g.setColor(progressBar.getForeground()); - barRectHeight -= 2; - barRectWidth -= 2; if (barRectWidth <= 0 || barRectHeight <= 0) { return; } - Graphics2D g2 = (Graphics2D)g; g2.setStroke(new BasicStroke((float)(vertical ? barRectWidth : barRectHeight), BasicStroke.CAP_BUTT, BasicStroke.JOIN_BEVEL)); if (!vertical) { if (isLeftToRight) { - g2.drawLine(2, barRectHeight / 2 + 1, - amountFull - 2, barRectHeight / 2 + 1); + g2.drawLine(0, barRectHeight / 2, + amountFull, barRectHeight / 2); } else { g2.drawLine(2 + barRectWidth, barRectHeight / 2 + 1, 2 + barRectWidth - (amountFull - 2), barRectHeight / 2 + 1); } - paintString(g, 0, 0, barRectWidth, barRectHeight, amountFull, null); + paintString(g, 0, 0, (int)(barRectWidth / scaleX), + (int)(barRectHeight / scaleY), amountFull, null); } else { g2.drawLine(barRectWidth/2 + 1, barRectHeight + 1, barRectWidth/2 + 1, barRectHeight + 1 - amountFull + 2); diff --git a/src/java.management/share/classes/java/lang/management/ManagementPermission.java b/src/java.management/share/classes/java/lang/management/ManagementPermission.java index 2dbee65e5cb72..7c9958d72cb05 100644 --- a/src/java.management/share/classes/java/lang/management/ManagementPermission.java +++ b/src/java.management/share/classes/java/lang/management/ManagementPermission.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,6 +31,7 @@ * @apiNote * This permission cannot be used for controlling access to resources * as the Security Manager is no longer supported. + * Consequently this class is deprecated for removal in a future release. * * @author Mandy Chung * @since 1.5 @@ -41,8 +42,11 @@ * @see java.security.PermissionCollection * @see java.lang.SecurityManager * + * @deprecated This class was only useful in conjunction with the Security Manager, + * which is no longer supported. There is no replacement for this class. + * */ - +@Deprecated(since="25", forRemoval=true) public final class ManagementPermission extends java.security.BasicPermission { private static final long serialVersionUID = 1897496590799378737L; diff --git a/src/java.management/share/classes/javax/management/MBeanPermission.java b/src/java.management/share/classes/javax/management/MBeanPermission.java index 46717846cb3c9..abc3cdda7a89c 100644 --- a/src/java.management/share/classes/javax/management/MBeanPermission.java +++ b/src/java.management/share/classes/javax/management/MBeanPermission.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -161,9 +161,14 @@ * @apiNote * This permission cannot be used for controlling access to resources * as the Security Manager is no longer supported. + * Consequently this class is deprecated for removal in a future release. + * + * @deprecated This class was only useful in conjunction with the Security Manager, + * which is no longer supported. There is no replacement for this class. * * @since 1.5 */ +@Deprecated(since="25", forRemoval=true) public class MBeanPermission extends Permission { private static final long serialVersionUID = -2416928705275160661L; diff --git a/src/java.management/share/classes/javax/management/MBeanServerPermission.java b/src/java.management/share/classes/javax/management/MBeanServerPermission.java index dc77d605912b2..c59c3e5ec3567 100644 --- a/src/java.management/share/classes/javax/management/MBeanServerPermission.java +++ b/src/java.management/share/classes/javax/management/MBeanServerPermission.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -67,9 +67,14 @@ * @apiNote * This permission cannot be used for controlling access to resources * as the Security Manager is no longer supported. + * Consequently this class is deprecated for removal in a future release. + * + * @deprecated This class was only useful in conjunction with the Security Manager, + * which is no longer supported. There is no replacement for this class. * * @since 1.5 */ +@Deprecated(since="25", forRemoval=true) public class MBeanServerPermission extends BasicPermission { private static final long serialVersionUID = -5661980843569388590L; @@ -334,6 +339,7 @@ public PermissionCollection newPermissionCollection() { * implementation from defining a PermissionCollection there with an * optimized "implies" method. */ +@SuppressWarnings("removal") class MBeanServerPermissionCollection extends PermissionCollection { /** @serial Null if no permissions in collection, otherwise a single permission that is the union of all permissions that diff --git a/src/java.management/share/classes/javax/management/MBeanTrustPermission.java b/src/java.management/share/classes/javax/management/MBeanTrustPermission.java index d961fe471930a..2929b5c6097be 100644 --- a/src/java.management/share/classes/javax/management/MBeanTrustPermission.java +++ b/src/java.management/share/classes/javax/management/MBeanTrustPermission.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,9 +44,14 @@ * @apiNote * This permission cannot be used for controlling access to resources * as the Security Manager is no longer supported. + * Consequently this class is deprecated for removal in a future release. + * + * @deprecated This class was only useful in conjunction with the Security Manager, + * which is no longer supported. There is no replacement for this class. * * @since 1.5 */ +@Deprecated(since="25", forRemoval=true) public class MBeanTrustPermission extends BasicPermission { private static final long serialVersionUID = -2952178077029018140L; diff --git a/src/java.management/share/classes/javax/management/remote/SubjectDelegationPermission.java b/src/java.management/share/classes/javax/management/remote/SubjectDelegationPermission.java index e2346e92bd5e5..9c5f4f05927ab 100644 --- a/src/java.management/share/classes/javax/management/remote/SubjectDelegationPermission.java +++ b/src/java.management/share/classes/javax/management/remote/SubjectDelegationPermission.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -56,9 +56,14 @@ * @apiNote * This permission cannot be used for controlling access to resources * as the Security Manager is no longer supported. + * Consequently this class is deprecated for removal in a future release. + * + * @deprecated This class was only useful in conjunction with the Security Manager, + * which is no longer supported. There is no replacement for this class. * * @since 1.5 */ +@Deprecated(since="25", forRemoval=true) public final class SubjectDelegationPermission extends BasicPermission { private static final long serialVersionUID = 1481618113008682343L; diff --git a/src/java.naming/share/classes/com/sun/jndi/ldap/DefaultResponseControlFactory.java b/src/java.naming/share/classes/com/sun/jndi/ldap/DefaultResponseControlFactory.java index 89b01484cd4f1..534aa95a91690 100644 --- a/src/java.naming/share/classes/com/sun/jndi/ldap/DefaultResponseControlFactory.java +++ b/src/java.naming/share/classes/com/sun/jndi/ldap/DefaultResponseControlFactory.java @@ -36,7 +36,7 @@ *

      *
    • * Paged results, as defined in - * RFC 2696. + * RFC 2696. *
    • * Server-side sorting, as defined in * RFC 2891. diff --git a/src/java.naming/share/classes/javax/naming/ldap/PagedResultsControl.java b/src/java.naming/share/classes/javax/naming/ldap/PagedResultsControl.java index ba3d42fb0a761..83391ba9c7416 100644 --- a/src/java.naming/share/classes/javax/naming/ldap/PagedResultsControl.java +++ b/src/java.naming/share/classes/javax/naming/ldap/PagedResultsControl.java @@ -92,7 +92,7 @@ * } *

      * This class implements the LDAPv3 Control for paged-results as defined in - * RFC 2696. + * RFC 2696. * * The control's value has the following ASN.1 definition: *

      {@code
      diff --git a/src/java.naming/share/classes/javax/naming/ldap/PagedResultsResponseControl.java b/src/java.naming/share/classes/javax/naming/ldap/PagedResultsResponseControl.java
      index d9d20f2bdedb2..798eea5a35541 100644
      --- a/src/java.naming/share/classes/javax/naming/ldap/PagedResultsResponseControl.java
      +++ b/src/java.naming/share/classes/javax/naming/ldap/PagedResultsResponseControl.java
      @@ -1,5 +1,5 @@
       /*
      - * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
      + * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
        * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
        *
        * This code is free software; you can redistribute it and/or modify it
      @@ -40,7 +40,7 @@
        * 

      * This class implements the LDAPv3 Response Control for * paged-results as defined in - * RFC 2696. + * RFC 2696. * * The control's value has the following ASN.1 definition: *

      diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Threads.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Threads.java
      index f449fc317e1aa..b772959b811e0 100644
      --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Threads.java
      +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Threads.java
      @@ -155,7 +155,11 @@ private static synchronized void initialize(TypeDataBase db) {
               virtualConstructor.addMapping("NotificationThread", NotificationThread.class);
               virtualConstructor.addMapping("StringDedupThread", StringDedupThread.class);
               virtualConstructor.addMapping("AttachListenerThread", AttachListenerThread.class);
      -        virtualConstructor.addMapping("DeoptimizeObjectsALotThread", DeoptimizeObjectsALotThread.class);
      +
      +        /* Only add DeoptimizeObjectsALotThread if it is actually present in the type database. */
      +        if (db.lookupType("DeoptimizeObjectsALotThread", false) != null) {
      +            virtualConstructor.addMapping("DeoptimizeObjectsALotThread", DeoptimizeObjectsALotThread.class);
      +        }
           }
       
           public Threads() {
      diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ByteVector.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ByteVector.java
      index bbf9d148e6643..e382914168351 100644
      --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ByteVector.java
      +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ByteVector.java
      @@ -1,5 +1,5 @@
       /*
      - * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
      + * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
        * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
        *
        * This code is free software; you can redistribute it and/or modify it
      @@ -4105,7 +4105,7 @@ static long booleanArrayAddress(boolean[] a, int index) {
       
           @ForceInline
           static long byteArrayAddress(byte[] a, int index) {
      -        return (long) Unsafe.ARRAY_BYTE_BASE_OFFSET + index;
      +        return Unsafe.ARRAY_BYTE_BASE_OFFSET + index;
           }
       
           // ================================================
      diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/DoubleVector.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/DoubleVector.java
      index a217bec2dc7c6..cbb21667a154f 100644
      --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/DoubleVector.java
      +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/DoubleVector.java
      @@ -1,5 +1,5 @@
       /*
      - * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
      + * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
        * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
        *
        * This code is free software; you can redistribute it and/or modify it
      @@ -3615,7 +3615,7 @@ static long arrayAddress(double[] a, int index) {
       
           @ForceInline
           static long byteArrayAddress(byte[] a, int index) {
      -        return (long) Unsafe.ARRAY_BYTE_BASE_OFFSET + index;
      +        return Unsafe.ARRAY_BYTE_BASE_OFFSET + index;
           }
       
           // ================================================
      diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/FloatVector.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/FloatVector.java
      index 1b2ca02e247cb..78259e7698bbd 100644
      --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/FloatVector.java
      +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/FloatVector.java
      @@ -1,5 +1,5 @@
       /*
      - * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
      + * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
        * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
        *
        * This code is free software; you can redistribute it and/or modify it
      @@ -3565,7 +3565,7 @@ static long arrayAddress(float[] a, int index) {
       
           @ForceInline
           static long byteArrayAddress(byte[] a, int index) {
      -        return (long) Unsafe.ARRAY_BYTE_BASE_OFFSET + index;
      +        return Unsafe.ARRAY_BYTE_BASE_OFFSET + index;
           }
       
           // ================================================
      diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/IntVector.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/IntVector.java
      index eee30600bac58..d68bba1d7e25b 100644
      --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/IntVector.java
      +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/IntVector.java
      @@ -3743,7 +3743,7 @@ static long arrayAddress(int[] a, int index) {
       
           @ForceInline
           static long byteArrayAddress(byte[] a, int index) {
      -        return (long) Unsafe.ARRAY_BYTE_BASE_OFFSET + index;
      +        return Unsafe.ARRAY_BYTE_BASE_OFFSET + index;
           }
       
           // ================================================
      diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/LongVector.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/LongVector.java
      index 637e9c028748b..1fa1cafac4eda 100644
      --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/LongVector.java
      +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/LongVector.java
      @@ -1,5 +1,5 @@
       /*
      - * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
      + * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
        * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
        *
        * This code is free software; you can redistribute it and/or modify it
      @@ -3678,7 +3678,7 @@ static long arrayAddress(long[] a, int index) {
       
           @ForceInline
           static long byteArrayAddress(byte[] a, int index) {
      -        return (long) Unsafe.ARRAY_BYTE_BASE_OFFSET + index;
      +        return Unsafe.ARRAY_BYTE_BASE_OFFSET + index;
           }
       
           // ================================================
      diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ShortVector.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ShortVector.java
      index cf9031d613c08..19d7fa1d95e9e 100644
      --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ShortVector.java
      +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/ShortVector.java
      @@ -1,5 +1,5 @@
       /*
      - * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
      + * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
        * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
        *
        * This code is free software; you can redistribute it and/or modify it
      @@ -4096,7 +4096,7 @@ static long charArrayAddress(char[] a, int index) {
       
           @ForceInline
           static long byteArrayAddress(byte[] a, int index) {
      -        return (long) Unsafe.ARRAY_BYTE_BASE_OFFSET + index;
      +        return Unsafe.ARRAY_BYTE_BASE_OFFSET + index;
           }
       
           // ================================================
      diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/X-Vector.java.template b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/X-Vector.java.template
      index c7598ac40a30d..05979330ab100 100644
      --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/X-Vector.java.template
      +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/X-Vector.java.template
      @@ -1,5 +1,5 @@
       /*
      - * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
      + * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
        * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
        *
        * This code is free software; you can redistribute it and/or modify it
      @@ -5348,7 +5348,7 @@ public abstract class $abstractvectortype$ extends AbstractVector<$Boxtype$> {
       
           @ForceInline
           static long byteArrayAddress(byte[] a, int index) {
      -        return (long) Unsafe.ARRAY_BYTE_BASE_OFFSET + index;
      +        return Unsafe.ARRAY_BYTE_BASE_OFFSET + index;
           }
       
           // ================================================
      diff --git a/src/jdk.jdi/share/classes/com/sun/jdi/connect/spi/TransportService.java b/src/jdk.jdi/share/classes/com/sun/jdi/connect/spi/TransportService.java
      index c3889cb186ab2..6639797659ba3 100644
      --- a/src/jdk.jdi/share/classes/com/sun/jdi/connect/spi/TransportService.java
      +++ b/src/jdk.jdi/share/classes/com/sun/jdi/connect/spi/TransportService.java
      @@ -1,5 +1,5 @@
       /*
      - * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
      + * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
        * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
        *
        * This code is free software; you can redistribute it and/or modify it
      @@ -64,7 +64,7 @@
        * but may involve techniques such as the positive
        * acknowledgment with retransmission technique used in
        * protocols such as the Transmission Control Protocol (TCP)
      - * (see  RFC 793
      + * (see  RFC 793
        * ).
        *
        * 

      A transport service can be used to initiate a connection diff --git a/src/jdk.unsupported/share/classes/sun/misc/Unsafe.java b/src/jdk.unsupported/share/classes/sun/misc/Unsafe.java index 043af1fc9b73d..b0a27d368ff7c 100644 --- a/src/jdk.unsupported/share/classes/sun/misc/Unsafe.java +++ b/src/jdk.unsupported/share/classes/sun/misc/Unsafe.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -863,7 +863,7 @@ public void freeMemory(long address) { * @deprecated Not needed when using {@link VarHandle} or {@link java.lang.foreign}. */ @Deprecated(since="23", forRemoval=true) - public static final int INVALID_FIELD_OFFSET = jdk.internal.misc.Unsafe.INVALID_FIELD_OFFSET; + public static final int INVALID_FIELD_OFFSET = (int) jdk.internal.misc.Unsafe.INVALID_FIELD_OFFSET; /** * Reports the location of a given field in the storage allocation of its @@ -994,7 +994,7 @@ public Object staticFieldBase(Field f) { @ForceInline public int arrayBaseOffset(Class arrayClass) { beforeMemoryAccess(); - return theInternalUnsafe.arrayBaseOffset(arrayClass); + return (int) theInternalUnsafe.arrayBaseOffset(arrayClass); } /** The value of {@code arrayBaseOffset(boolean[].class)}. @@ -1002,63 +1002,63 @@ public int arrayBaseOffset(Class arrayClass) { * @deprecated Not needed when using {@link VarHandle} or {@link java.lang.foreign}. */ @Deprecated(since="23", forRemoval=true) - public static final int ARRAY_BOOLEAN_BASE_OFFSET = jdk.internal.misc.Unsafe.ARRAY_BOOLEAN_BASE_OFFSET; + public static final int ARRAY_BOOLEAN_BASE_OFFSET = (int) jdk.internal.misc.Unsafe.ARRAY_BOOLEAN_BASE_OFFSET; /** The value of {@code arrayBaseOffset(byte[].class)}. * * @deprecated Not needed when using {@link VarHandle} or {@link java.lang.foreign}. */ @Deprecated(since="23", forRemoval=true) - public static final int ARRAY_BYTE_BASE_OFFSET = jdk.internal.misc.Unsafe.ARRAY_BYTE_BASE_OFFSET; + public static final int ARRAY_BYTE_BASE_OFFSET = (int) jdk.internal.misc.Unsafe.ARRAY_BYTE_BASE_OFFSET; /** The value of {@code arrayBaseOffset(short[].class)}. * * @deprecated Not needed when using {@link VarHandle} or {@link java.lang.foreign}. */ @Deprecated(since="23", forRemoval=true) - public static final int ARRAY_SHORT_BASE_OFFSET = jdk.internal.misc.Unsafe.ARRAY_SHORT_BASE_OFFSET; + public static final int ARRAY_SHORT_BASE_OFFSET = (int) jdk.internal.misc.Unsafe.ARRAY_SHORT_BASE_OFFSET; /** The value of {@code arrayBaseOffset(char[].class)}. * * @deprecated Not needed when using {@link VarHandle} or {@link java.lang.foreign}. */ @Deprecated(since="23", forRemoval=true) - public static final int ARRAY_CHAR_BASE_OFFSET = jdk.internal.misc.Unsafe.ARRAY_CHAR_BASE_OFFSET; + public static final int ARRAY_CHAR_BASE_OFFSET = (int) jdk.internal.misc.Unsafe.ARRAY_CHAR_BASE_OFFSET; /** The value of {@code arrayBaseOffset(int[].class)}. * * @deprecated Not needed when using {@link VarHandle} or {@link java.lang.foreign}. */ @Deprecated(since="23", forRemoval=true) - public static final int ARRAY_INT_BASE_OFFSET = jdk.internal.misc.Unsafe.ARRAY_INT_BASE_OFFSET; + public static final int ARRAY_INT_BASE_OFFSET = (int) jdk.internal.misc.Unsafe.ARRAY_INT_BASE_OFFSET; /** The value of {@code arrayBaseOffset(long[].class)}. * * @deprecated Not needed when using {@link VarHandle} or {@link java.lang.foreign}. */ @Deprecated(since="23", forRemoval=true) - public static final int ARRAY_LONG_BASE_OFFSET = jdk.internal.misc.Unsafe.ARRAY_LONG_BASE_OFFSET; + public static final int ARRAY_LONG_BASE_OFFSET = (int) jdk.internal.misc.Unsafe.ARRAY_LONG_BASE_OFFSET; /** The value of {@code arrayBaseOffset(float[].class)}. * * @deprecated Not needed when using {@link VarHandle} or {@link java.lang.foreign}. */ @Deprecated(since="23", forRemoval=true) - public static final int ARRAY_FLOAT_BASE_OFFSET = jdk.internal.misc.Unsafe.ARRAY_FLOAT_BASE_OFFSET; + public static final int ARRAY_FLOAT_BASE_OFFSET = (int) jdk.internal.misc.Unsafe.ARRAY_FLOAT_BASE_OFFSET; /** The value of {@code arrayBaseOffset(double[].class)}. * * @deprecated Not needed when using {@link VarHandle} or {@link java.lang.foreign}. */ @Deprecated(since="23", forRemoval=true) - public static final int ARRAY_DOUBLE_BASE_OFFSET = jdk.internal.misc.Unsafe.ARRAY_DOUBLE_BASE_OFFSET; + public static final int ARRAY_DOUBLE_BASE_OFFSET = (int) jdk.internal.misc.Unsafe.ARRAY_DOUBLE_BASE_OFFSET; /** The value of {@code arrayBaseOffset(Object[].class)}. * * @deprecated Not needed when using {@link VarHandle} or {@link java.lang.foreign}. */ @Deprecated(since="23", forRemoval=true) - public static final int ARRAY_OBJECT_BASE_OFFSET = jdk.internal.misc.Unsafe.ARRAY_OBJECT_BASE_OFFSET; + public static final int ARRAY_OBJECT_BASE_OFFSET = (int) jdk.internal.misc.Unsafe.ARRAY_OBJECT_BASE_OFFSET; /** * Reports the scale factor for addressing elements in the storage diff --git a/test/docs/jdk/javadoc/doccheck/ExtLinksJdk.txt b/test/docs/jdk/javadoc/doccheck/ExtLinksJdk.txt index 704bdffb283c8..2e88186fe79f0 100644 --- a/test/docs/jdk/javadoc/doccheck/ExtLinksJdk.txt +++ b/test/docs/jdk/javadoc/doccheck/ExtLinksJdk.txt @@ -43,8 +43,8 @@ http://www.iana.org/assignments/character-sets/character-sets.xhtml http://www.iana.org/assignments/media-types/ http://www.iana.org/assignments/uri-schemes.html http://www.ietf.org/ -http://www.ietf.org/rfc/rfc0793.txt -http://www.ietf.org/rfc/rfc0822.txt +https://www.ietf.org/rfc/rfc793.txt +https://www.ietf.org/rfc/rfc822.txt http://www.ietf.org/rfc/rfc1122.txt http://www.ietf.org/rfc/rfc1123.txt http://www.ietf.org/rfc/rfc1323.txt @@ -83,8 +83,7 @@ http://www.ietf.org/rfc/rfc2440.txt http://www.ietf.org/rfc/rfc2474.txt http://www.ietf.org/rfc/rfc2609.txt http://www.ietf.org/rfc/rfc2616.txt -http://www.ietf.org/rfc/rfc2696 -http://www.ietf.org/rfc/rfc2696.txt +https://www.ietf.org/rfc/rfc2696.txt http://www.ietf.org/rfc/rfc2710.txt http://www.ietf.org/rfc/rfc2732.txt http://www.ietf.org/rfc/rfc2743.txt diff --git a/test/hotspot/gtest/nmt/test_nmt_nativecallstackstorage.cpp b/test/hotspot/gtest/nmt/test_nmt_nativecallstackstorage.cpp index 7ff18dc794ec9..b4b43c92cf18c 100644 --- a/test/hotspot/gtest/nmt/test_nmt_nativecallstackstorage.cpp +++ b/test/hotspot/gtest/nmt/test_nmt_nativecallstackstorage.cpp @@ -42,7 +42,7 @@ TEST_VM_F(NMTNativeCallStackStorageTest, DoNotStoreStackIfNotDetailed) { TEST_VM_F(NMTNativeCallStackStorageTest, CollisionsReceiveDifferentIndexes) { constexpr const int nr_of_stacks = 10; NativeCallStack ncs_arr[nr_of_stacks]; - for (int i = 0; i < nr_of_stacks; i++) { + for (size_t i = 0; i < nr_of_stacks; i++) { ncs_arr[i] = NativeCallStack((address*)(&i), 1); } @@ -52,7 +52,7 @@ TEST_VM_F(NMTNativeCallStackStorageTest, CollisionsReceiveDifferentIndexes) { si_arr[i] = ncss.push(ncs_arr[i]); } - // Every SI should be different as every sack is different + // Every SI should be different as every stack is different for (int i = 0; i < nr_of_stacks; i++) { for (int j = 0; j < nr_of_stacks; j++) { if (i == j) continue; diff --git a/test/hotspot/gtest/runtime/test_stubRoutines.cpp b/test/hotspot/gtest/runtime/test_stubRoutines.cpp index 02ab92245a6ca..6d718a8209f55 100644 --- a/test/hotspot/gtest/runtime/test_stubRoutines.cpp +++ b/test/hotspot/gtest/runtime/test_stubRoutines.cpp @@ -109,7 +109,7 @@ TEST_VM(StubRoutines, array_fill_routine) { MACOS_AARCH64_ONLY(os::current_thread_enable_wx(WXExec)); #define TEST_FILL(type) \ - if (StubRoutines::_##type##_fill != nullptr) { \ + if (StubRoutines::type##_fill() != nullptr) { \ union { \ double d; \ type body[96]; \ @@ -124,12 +124,12 @@ TEST_VM(StubRoutines, array_fill_routine) { for (int aligned = 0; aligned < 2; aligned++) { \ if (aligned) { \ if (((intptr_t)start) % HeapWordSize == 0) { \ - ((void (*)(type*, int, int))StubRoutines::_arrayof_##type##_fill)(start, v, 80); \ + ((void (*)(type*, int, int))StubRoutines::arrayof_##type##_fill())(start, v, 80); \ } else { \ continue; \ } \ } else { \ - ((void (*)(type*, int, int))StubRoutines::_##type##_fill)(start, v, 80); \ + ((void (*)(type*, int, int))StubRoutines::type##_fill())(start, v, 80); \ } \ for (int i = 0; i < 96; i++) { \ if (i < (8 + offset) || i >= (88 + offset)) { \ diff --git a/test/hotspot/gtest/utilities/test_rbtree.cpp b/test/hotspot/gtest/utilities/test_rbtree.cpp new file mode 100644 index 0000000000000..c1be34b08d64c --- /dev/null +++ b/test/hotspot/gtest/utilities/test_rbtree.cpp @@ -0,0 +1,573 @@ +/* + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "memory/resourceArea.hpp" +#include "runtime/os.hpp" +#include "testutils.hpp" +#include "unittest.hpp" +#include "utilities/growableArray.hpp" +#include "utilities/rbTree.hpp" +#include "utilities/rbTree.inline.hpp" + + +class RBTreeTest : public testing::Test { +public: + struct Cmp { + static int cmp(int a, int b) { + return a - b; + } + }; + + struct CmpInverse { + static int cmp(int a, int b) { + return b - a; + } + }; + + struct FCmp { + static int cmp(float a, float b) { + if (a < b) return -1; + if (a == b) return 0; + return 1; + } + }; + +// Bump-pointer style allocator that can't free +template +struct ArrayAllocator { + uint8_t area[AreaSize]; + size_t offset = 0; + + void* allocate(size_t sz) { + if (offset + sz > AreaSize) { + vm_exit_out_of_memory(sz, OOM_MALLOC_ERROR, + "red-black tree failed allocation"); + } + void* place = &area[offset]; + offset += sz; + return place; + } + + void free(void* ptr) { } +}; + +#ifdef ASSERT + template + void verify_it(RBTree& t) { + t.verify_self(); + } +#endif // ASSERT + +using RBTreeInt = RBTreeCHeap; + +public: + void inserting_duplicates_results_in_one_value() { + constexpr int up_to = 10; + GrowableArrayCHeap nums_seen(up_to, up_to, 0); + RBTreeInt rbtree; + + for (int i = 0; i < up_to; i++) { + rbtree.upsert(i, i); + rbtree.upsert(i, i); + rbtree.upsert(i, i); + rbtree.upsert(i, i); + rbtree.upsert(i, i); + } + + rbtree.visit_in_order([&](RBTreeInt::RBNode* node) { + nums_seen.at(node->key())++; + }); + for (int i = 0; i < up_to; i++) { + EXPECT_EQ(1, nums_seen.at(i)); + } + } + + void rbtree_ought_not_leak() { + struct LeakCheckedAllocator { + int allocations; + + LeakCheckedAllocator() + : allocations(0) { + } + + void* allocate(size_t sz) { + void* allocation = os::malloc(sz, mtTest); + if (allocation == nullptr) { + vm_exit_out_of_memory(sz, OOM_MALLOC_ERROR, "rbtree failed allocation"); + } + ++allocations; + return allocation; + } + + void free(void* ptr) { + --allocations; + os::free(ptr); + } + }; + + constexpr int up_to = 10; + { + RBTree rbtree; + for (int i = 0; i < up_to; i++) { + rbtree.upsert(i, i); + } + EXPECT_EQ(up_to, rbtree._allocator.allocations); + for (int i = 0; i < up_to; i++) { + rbtree.remove(i); + } + EXPECT_EQ(0, rbtree._allocator.allocations); + EXPECT_EQ(nullptr, rbtree._root); + } + + { + RBTree rbtree; + for (int i = 0; i < up_to; i++) { + rbtree.upsert(i, i); + } + rbtree.remove_all(); + EXPECT_EQ(0, rbtree._allocator.allocations); + EXPECT_EQ(nullptr, rbtree._root); + } + } + + void test_find() { + struct Empty {}; + RBTreeCHeap rbtree; + using Node = RBTreeCHeap::RBNode; + + Node* n = nullptr; + auto test = [&](float f) { + EXPECT_EQ(nullptr, rbtree.find(f)); + rbtree.upsert(f, Empty{}); + const Node* n = rbtree.find_node(f); + EXPECT_NE(nullptr, n); + EXPECT_EQ(f, n->key()); + }; + + test(1.0f); + test(5.0f); + test(0.0f); + } + + void test_visitors() { + { // Tests with 'default' ordering (ascending) + RBTreeInt rbtree; + using Node = RBTreeInt::RBNode; + + rbtree.visit_range_in_order(0, 100, [&](Node* x) { + EXPECT_TRUE(false) << "Empty rbtree has no nodes to visit"; + }); + + // Single-element set + rbtree.upsert(1, 0); + int count = 0; + rbtree.visit_range_in_order(0, 100, [&](Node* x) { + count++; + }); + EXPECT_EQ(1, count); + + count = 0; + rbtree.visit_in_order([&](Node* x) { + count++; + }); + EXPECT_EQ(1, count); + + // Add an element outside of the range that should not be visited on the right side and + // one on the left side. + rbtree.upsert(101, 0); + rbtree.upsert(-1, 0); + count = 0; + rbtree.visit_range_in_order(0, 100, [&](Node* x) { + count++; + }); + EXPECT_EQ(1, count); + + count = 0; + rbtree.visit_in_order([&](Node* x) { + count++; + }); + EXPECT_EQ(3, count); + + // Visiting empty range [0, 0) == {} + rbtree.upsert(0, 0); // This node should not be visited. + rbtree.visit_range_in_order(0, 0, [&](Node* x) { + EXPECT_TRUE(false) << "Empty visiting range should not visit any node"; + }); + + rbtree.remove_all(); + for (int i = 0; i < 11; i++) { + rbtree.upsert(i, 0); + } + + ResourceMark rm; + GrowableArray seen; + rbtree.visit_range_in_order(0, 10, [&](Node* x) { + seen.push(x->key()); + }); + EXPECT_EQ(10, seen.length()); + for (int i = 0; i < 10; i++) { + EXPECT_EQ(i, seen.at(i)); + } + + seen.clear(); + rbtree.visit_in_order([&](Node* x) { + seen.push(x->key()); + }); + EXPECT_EQ(11, seen.length()); + for (int i = 0; i < 10; i++) { + EXPECT_EQ(i, seen.at(i)); + } + + seen.clear(); + rbtree.visit_range_in_order(10, 12, [&](Node* x) { + seen.push(x->key()); + }); + EXPECT_EQ(1, seen.length()); + EXPECT_EQ(10, seen.at(0)); + } + { // Test with descending ordering + RBTreeCHeap rbtree; + using Node = RBTreeCHeap::RBNode; + + for (int i = 0; i < 10; i++) { + rbtree.upsert(i, 0); + } + ResourceMark rm; + GrowableArray seen; + rbtree.visit_range_in_order(9, -1, [&](Node* x) { + seen.push(x->key()); + }); + EXPECT_EQ(10, seen.length()); + for (int i = 0; i < 10; i++) { + EXPECT_EQ(10-i-1, seen.at(i)); + } + seen.clear(); + + rbtree.visit_in_order([&](Node* x) { + seen.push(x->key()); + }); + EXPECT_EQ(10, seen.length()); + for (int i = 0; i < 10; i++) { + EXPECT_EQ(10 - i - 1, seen.at(i)); + } + } + } + + void test_closest_leq() { + using Node = RBTreeInt::RBNode; + { + RBTreeInt rbtree; + Node* n = rbtree.closest_leq(0); + EXPECT_EQ(nullptr, n); + + rbtree.upsert(0, 0); + n = rbtree.closest_leq(0); + EXPECT_EQ(0, n->key()); + + rbtree.upsert(-1, -1); + n = rbtree.closest_leq(0); + EXPECT_EQ(0, n->key()); + + rbtree.upsert(6, 0); + n = rbtree.closest_leq(6); + EXPECT_EQ(6, n->key()); + + n = rbtree.closest_leq(-2); + EXPECT_EQ(nullptr, n); + } + } + + void test_node_prev() { + RBTreeInt _tree; + using Node = RBTreeInt::RBNode; + constexpr int num_nodes = 100; + + for (int i = num_nodes; i > 0; i--) { + _tree.upsert(i, i); + } + + Node* node = _tree.find_node(num_nodes); + int count = num_nodes; + while (node != nullptr) { + EXPECT_EQ(count, node->val()); + node = node->prev(); + count--; + } + + EXPECT_EQ(count, 0); + } + + void test_node_next() { + RBTreeInt _tree; + using Node = RBTreeInt::RBNode; + constexpr int num_nodes = 100; + + for (int i = 0; i < num_nodes; i++) { + _tree.upsert(i, i); + } + + Node* node = _tree.find_node(0); + int count = 0; + while (node != nullptr) { + EXPECT_EQ(count, node->val()); + node = node->next(); + count++; + } + + EXPECT_EQ(count, num_nodes); + } + + void test_stable_nodes() { + using Node = RBTreeInt::RBNode; + RBTreeInt rbtree; + ResourceMark rm; + GrowableArray a(10000); + for (int i = 0; i < 10000; i++) { + rbtree.upsert(i, i); + a.push(rbtree.find_node(i)); + } + + for (int i = 0; i < 2000; i++) { + int r = os::random() % 10000; + Node* to_delete = rbtree.find_node(r); + if (to_delete != nullptr && to_delete->_left != nullptr && + to_delete->_right != nullptr) { + rbtree.remove(to_delete); + } + } + + // After deleting, nodes should have been moved around but kept their values + for (int i = 0; i < 10000; i++) { + const Node* n = rbtree.find_node(i); + if (n != nullptr) { + EXPECT_EQ(a.at(i), n); + } + } + } + + void test_stable_nodes_addresses() { + using Tree = RBTreeCHeap; + using Node = Tree::RBNode; + Tree rbtree; + for (int i = 0; i < 10000; i++) { + rbtree.upsert(i, nullptr); + Node* inserted_node = rbtree.find_node(i); + inserted_node->val() = inserted_node; + } + + for (int i = 0; i < 2000; i++) { + int r = os::random() % 10000; + Node* to_delete = rbtree.find_node(r); + if (to_delete != nullptr && to_delete->_left != nullptr && + to_delete->_right != nullptr) { + rbtree.remove(to_delete); + } + } + + // After deleting, values should have remained consistant + rbtree.visit_in_order([&](Node* node) { + EXPECT_EQ(node, node->val()); + }); + } + +#ifdef ASSERT + void test_fill_verify() { + RBTreeInt rbtree; + + ResourceMark rm; + GrowableArray allocations; + + int size = 10000; + // Create random values + for (int i = 0; i < size; i++) { + int r = os::random() % size; + allocations.append(r); + } + + // Insert ~half of the values + for (int i = 0; i < size; i++) { + int r = os::random(); + if (r % 2 == 0) { + rbtree.upsert(allocations.at(i), allocations.at(i)); + } + if (i % 100 == 0) { + verify_it(rbtree); + } + } + + // Insert and remove randomly + for (int i = 0; i < size; i++) { + int r = os::random(); + if (r % 2 == 0) { + rbtree.upsert(allocations.at(i), allocations.at(i)); + } else { + rbtree.remove(allocations.at(i)); + } + if (i % 100 == 0) { + verify_it(rbtree); + } + } + + // Remove all elements + for (int i = 0; i < size; i++) { + rbtree.remove(allocations.at(i)); + } + + verify_it(rbtree); + EXPECT_EQ(rbtree.size(), 0UL); + } + + void test_nodes_visited_once() { + constexpr size_t memory_size = 65536; + using Tree = RBTree>; + using Node = Tree::RBNode; + + Tree tree; + + int num_nodes = memory_size / sizeof(Node); + for (int i = 0; i < num_nodes; i++) { + tree.upsert(i, i); + } + + Node* start = tree.find_node(0); + + Node* node = start; + for (int i = 0; i < num_nodes; i++) { + EXPECT_EQ(tree._expected_visited, node->_visited); + node += 1; + } + + verify_it(tree); + + node = start; + for (int i = 0; i < num_nodes; i++) { + EXPECT_EQ(tree._expected_visited, node->_visited); + node += 1; + } + + } +#endif // ASSERT + +}; + +TEST_VM_F(RBTreeTest, InsertingDuplicatesResultsInOneValue) { + this->inserting_duplicates_results_in_one_value(); +} + +TEST_VM_F(RBTreeTest, RBTreeOughtNotLeak) { + this->rbtree_ought_not_leak(); +} + +TEST_VM_F(RBTreeTest, TestFind) { + this->test_find(); +} + +TEST_VM_F(RBTreeTest, TestVisitors) { + this->test_visitors(); +} + +TEST_VM_F(RBTreeTest, TestClosestLeq) { + this->test_closest_leq(); +} + +TEST_VM_F(RBTreeTest, NodePrev) { + this->test_node_prev(); +} + +TEST_VM_F(RBTreeTest, NodeNext) { + this->test_node_next(); +} + +TEST_VM_F(RBTreeTest, NodeStableTest) { + this->test_stable_nodes(); +} + +TEST_VM_F(RBTreeTest, NodeStableAddressTest) { + this->test_stable_nodes_addresses(); +} + +#ifdef ASSERT +TEST_VM_F(RBTreeTest, FillAndVerify) { + this->test_fill_verify(); +} + +TEST_VM_F(RBTreeTest, NodesVisitedOnce) { + this->test_nodes_visited_once(); +} + +TEST_VM_F(RBTreeTest, InsertRemoveVerify) { + constexpr int num_nodes = 100; + for (int n_t1 = 0; n_t1 < num_nodes; n_t1++) { + for (int n_t2 = 0; n_t2 < n_t1; n_t2++) { + RBTreeInt tree; + for (int i = 0; i < n_t1; i++) { + tree.upsert(i, i); + } + for (int i = 0; i < n_t2; i++) { + tree.remove(i); + } + verify_it(tree); + } + } +} + +TEST_VM_F(RBTreeTest, VerifyItThroughStressTest) { + { // Repeatedly verify a tree of moderate size + RBTreeInt rbtree; + constexpr int ten_thousand = 10000; + for (int i = 0; i < ten_thousand; i++) { + int r = os::random(); + if (r % 2 == 0) { + rbtree.upsert(i, i); + } else { + rbtree.remove(i); + } + if (i % 100 == 0) { + verify_it(rbtree); + } + } + for (int i = 0; i < ten_thousand; i++) { + int r = os::random(); + if (r % 2 == 0) { + rbtree.upsert(i, i); + } else { + rbtree.remove(i); + } + if (i % 100 == 0) { + verify_it(rbtree); + } + } + } + { // Make a very large tree and verify at the end + struct Nothing {}; + RBTreeCHeap rbtree; + constexpr int one_hundred_thousand = 100000; + for (int i = 0; i < one_hundred_thousand; i++) { + rbtree.upsert(i, Nothing()); + } + verify_it(rbtree); + } +} + +#endif // ASSERT diff --git a/test/hotspot/jtreg/ProblemList-Virtual.txt b/test/hotspot/jtreg/ProblemList-Virtual.txt index 64b281ccc980e..2239a10f3e776 100644 --- a/test/hotspot/jtreg/ProblemList-Virtual.txt +++ b/test/hotspot/jtreg/ProblemList-Virtual.txt @@ -37,17 +37,8 @@ vmTestbase/nsk/jvmti/CompiledMethodUnload/compmethunload001/TestDescription.java #### ## Tests for functionality which currently is not supported for virtual threads -vmTestbase/nsk/jvmti/GetCurrentThreadCpuTime/curthrcputime001/TestDescription.java 8300708 generic-all -vmTestbase/nsk/jvmti/GetThreadCpuTime/thrcputime001/TestDescription.java 8300708 generic-all -vmTestbase/nsk/jvmti/NotifyFramePop/nframepop002/TestDescription.java 8300708 generic-all -vmTestbase/nsk/jvmti/NotifyFramePop/nframepop003/TestDescription.java 8300708 generic-all -vmTestbase/nsk/jvmti/StopThread/stopthrd006/TestDescription.java 8300708 generic-all -vmTestbase/nsk/jvmti/scenarios/events/EM02/em02t012/TestDescription.java 8300708 generic-all -vmTestbase/nsk/jvmti/SetLocalVariable/setlocal004/TestDescription.java 8300708 generic-all -vmTestbase/nsk/jvmti/SetLocalVariable/setlocal003/TestDescription.java 8300708 generic-all -vmTestbase/nsk/jvmti/SetLocalVariable/setlocal002/TestDescription.java 8300708 generic-all -vmTestbase/nsk/jvmti/SetLocalVariable/setlocal001/TestDescription.java 8300708 generic-all -vmTestbase/nsk/jvmti/unit/GetLocalVariable/getlocal003/TestDescription.java 8300708 generic-all +vmTestbase/nsk/jvmti/GetCurrentThreadCpuTime/curthrcputime001/TestDescription.java 8348844 generic-all +vmTestbase/nsk/jvmti/GetThreadCpuTime/thrcputime001/TestDescription.java 8348844 generic-all #### ## Test fails because it expects to find vthreads in GetAllThreads diff --git a/test/hotspot/jtreg/TEST.groups b/test/hotspot/jtreg/TEST.groups index 99a34c1ef836b..c3bc2351ac85a 100644 --- a/test/hotspot/jtreg/TEST.groups +++ b/test/hotspot/jtreg/TEST.groups @@ -1,5 +1,5 @@ # -# Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -518,14 +518,21 @@ hotspot_aot_classlinking = \ -runtime/cds/appcds/cacheObject/ArchivedIntegerCacheTest.java \ -runtime/cds/appcds/cacheObject/ArchivedModuleCompareTest.java \ -runtime/cds/appcds/CDSandJFR.java \ + -runtime/cds/appcds/customLoader/CustomClassListDump.java \ -runtime/cds/appcds/customLoader/HelloCustom_JFR.java \ + -runtime/cds/appcds/customLoader/OldClassAndInf.java \ -runtime/cds/appcds/customLoader/ParallelTestMultiFP.java \ -runtime/cds/appcds/customLoader/ParallelTestSingleFP.java \ -runtime/cds/appcds/customLoader/SameNameInTwoLoadersTest.java \ -runtime/cds/appcds/DumpClassListWithLF.java \ -runtime/cds/appcds/dynamicArchive/ModulePath.java \ + -runtime/cds/appcds/dynamicArchive/LambdaCustomLoader.java \ + -runtime/cds/appcds/dynamicArchive/LambdaForOldInfInBaseArchive.java \ -runtime/cds/appcds/dynamicArchive/LambdaInBaseArchive.java \ -runtime/cds/appcds/dynamicArchive/LambdasInTwoArchives.java \ + -runtime/cds/appcds/dynamicArchive/OldClassAndInf.java \ + -runtime/cds/appcds/dynamicArchive/OldClassInBaseArchive.java \ + -runtime/cds/appcds/dynamicArchive/OldClassVerifierTrouble.java \ -runtime/cds/appcds/HelloExtTest.java \ -runtime/cds/appcds/javaldr/GCDuringDump.java \ -runtime/cds/appcds/javaldr/LockDuringDump.java \ @@ -545,6 +552,13 @@ hotspot_aot_classlinking = \ -runtime/cds/appcds/jvmti \ -runtime/cds/appcds/LambdaProxyClasslist.java \ -runtime/cds/appcds/loaderConstraints/LoaderConstraintsTest.java \ + -runtime/cds/appcds/NestHostOldInf.java \ + -runtime/cds/appcds/OldClassTest.java \ + -runtime/cds/appcds/OldClassWithjsr.java \ + -runtime/cds/appcds/OldInfExtendsInfDefMeth.java \ + -runtime/cds/appcds/OldSuperClass.java \ + -runtime/cds/appcds/OldSuperInfIndirect.java \ + -runtime/cds/appcds/OldSuperInf.java \ -runtime/cds/appcds/redefineClass \ -runtime/cds/appcds/resolvedConstants/AOTLinkedLambdas.java \ -runtime/cds/appcds/resolvedConstants/AOTLinkedVarHandles.java \ diff --git a/test/hotspot/jtreg/compiler/c2/Test6968348.java b/test/hotspot/jtreg/compiler/c2/Test6968348.java index 17a2d61716b0a..488bcfcac589f 100644 --- a/test/hotspot/jtreg/compiler/c2/Test6968348.java +++ b/test/hotspot/jtreg/compiler/c2/Test6968348.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,7 +39,7 @@ public class Test6968348 { static Unsafe unsafe = Unsafe.getUnsafe(); static final long[] buffer = new long[4096]; - static int array_long_base_offset; + static long array_long_base_offset; public static void main(String[] args) throws Exception { array_long_base_offset = unsafe.arrayBaseOffset(long[].class); diff --git a/test/hotspot/jtreg/compiler/c2/cr8004867/TestIntUnsafeCAS.java b/test/hotspot/jtreg/compiler/c2/cr8004867/TestIntUnsafeCAS.java index e0c43b055295b..64c52446c3681 100644 --- a/test/hotspot/jtreg/compiler/c2/cr8004867/TestIntUnsafeCAS.java +++ b/test/hotspot/jtreg/compiler/c2/cr8004867/TestIntUnsafeCAS.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,7 +50,7 @@ public class TestIntUnsafeCAS { private static final int UNALIGN_OFF = 5; private static final Unsafe unsafe = Unsafe.getUnsafe(); - private static final int BASE; + private static final long BASE; static { try { BASE = unsafe.arrayBaseOffset(int[].class); diff --git a/test/hotspot/jtreg/compiler/c2/cr8004867/TestIntUnsafeOrdered.java b/test/hotspot/jtreg/compiler/c2/cr8004867/TestIntUnsafeOrdered.java index b7354c88dae52..342b146a0c893 100644 --- a/test/hotspot/jtreg/compiler/c2/cr8004867/TestIntUnsafeOrdered.java +++ b/test/hotspot/jtreg/compiler/c2/cr8004867/TestIntUnsafeOrdered.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,7 +50,7 @@ public class TestIntUnsafeOrdered { private static final int UNALIGN_OFF = 5; private static final Unsafe unsafe = Unsafe.getUnsafe(); - private static final int BASE; + private static final long BASE; static { try { BASE = unsafe.arrayBaseOffset(int[].class); diff --git a/test/hotspot/jtreg/compiler/c2/cr8004867/TestIntUnsafeVolatile.java b/test/hotspot/jtreg/compiler/c2/cr8004867/TestIntUnsafeVolatile.java index 3ff0a1323798e..3ea812a9e79ff 100644 --- a/test/hotspot/jtreg/compiler/c2/cr8004867/TestIntUnsafeVolatile.java +++ b/test/hotspot/jtreg/compiler/c2/cr8004867/TestIntUnsafeVolatile.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,7 +50,7 @@ public class TestIntUnsafeVolatile { private static final int UNALIGN_OFF = 5; private static final Unsafe unsafe = Unsafe.getUnsafe(); - private static final int BASE; + private static final long BASE; static { try { BASE = unsafe.arrayBaseOffset(int[].class); diff --git a/test/hotspot/jtreg/compiler/ciReplay/InliningBase.java b/test/hotspot/jtreg/compiler/ciReplay/InliningBase.java index e3ec752712303..fbe2e89a45d56 100644 --- a/test/hotspot/jtreg/compiler/ciReplay/InliningBase.java +++ b/test/hotspot/jtreg/compiler/ciReplay/InliningBase.java @@ -86,31 +86,31 @@ public InlineEntry(String klass, String method, String reason) { } public boolean isNormalInline() { - return reason.equals("inline (hot)"); + return reason.startsWith("inline (hot)"); } public boolean isForcedByReplay() { - return reason.equals("force inline by ciReplay"); + return reason.startsWith("force inline by ciReplay"); } public boolean isDisallowedByReplay() { - return reason.equals("failed to inline: disallowed by ciReplay"); + return reason.startsWith("failed to inline: disallowed by ciReplay"); } public boolean isUnloadedSignatureClasses() { - return reason.equals("failed to inline: unloaded signature classes"); + return reason.startsWith("failed to inline: unloaded signature classes"); } public boolean isForcedIncrementalInlineByReplay() { - return reason.equals("force (incremental) inline by ciReplay"); + return reason.startsWith("force (incremental) inline by ciReplay"); } public boolean isForcedInline() { - return reason.equals("force inline by annotation"); + return reason.startsWith("force inline by annotation"); } public boolean isTooDeep() { - return reason.equals("failed to inline: inlining too deep"); + return reason.startsWith("failed to inline: inlining too deep"); } @Override diff --git a/test/hotspot/jtreg/compiler/inlining/LateInlinePrinting.java b/test/hotspot/jtreg/compiler/inlining/LateInlinePrinting.java new file mode 100644 index 0000000000000..09ed466f2998d --- /dev/null +++ b/test/hotspot/jtreg/compiler/inlining/LateInlinePrinting.java @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + + +/** + * @test + * @bug 8319850 + * @summary PrintInlining should print which methods are late inlines + * @modules java.base/jdk.internal.misc + * @library /test/lib + * @requires vm.flagless + * + * @run driver compiler.inlining.LateInlinePrinting + */ + +package compiler.inlining; + +import jdk.test.lib.process.OutputAnalyzer; +import jdk.test.lib.process.ProcessTools; + +public class LateInlinePrinting { + public static class TestLateInlining { + public static void main(String[] args) { + for (int i = 0; i < 20_000; i++) { + test1(); + test2(); + } + } + + private static void test1() { + test3(); + testFailInline(); + testFailInline(); + test2(); + } + + private static void test2() { + inlined1(); + inlined2(); + } + + private static void test3() {} + + private static void testFailInline() {} + + private static void inlined1() {} + + private static void inlined2() {} + } + + + public static void main(String[] args) throws Exception { + ProcessBuilder pb = ProcessTools.createLimitedTestJavaProcessBuilder( + "-XX:-TieredCompilation", "-XX:-UseOnStackReplacement", "-XX:-BackgroundCompilation", + "-XX:+PrintCompilation", + "-XX:CompileCommand=compileonly,compiler.inlining.LateInlinePrinting$TestLateInlining::test1", + "-XX:CompileCommand=compileonly,compiler.inlining.LateInlinePrinting$TestLateInlining::test2", + "-XX:CompileCommand=quiet", "-XX:+PrintInlining", "-XX:+AlwaysIncrementalInline", + "-XX:CompileCommand=dontinline,compiler.inlining.LateInlinePrinting$TestLateInlining::testFailInline", + TestLateInlining.class.getName() + ); + + OutputAnalyzer analyzer = new OutputAnalyzer(pb.start()); + analyzer.shouldHaveExitValue(0); + + analyzer.shouldContain(""" +compiler.inlining.LateInlinePrinting$TestLateInlining::test2 (7 bytes) + @ 0 compiler.inlining.LateInlinePrinting$TestLateInlining::inlined1 (1 bytes) inline (hot) late inline succeeded + @ 3 compiler.inlining.LateInlinePrinting$TestLateInlining::inlined2 (1 bytes) inline (hot) late inline succeeded + """); + analyzer.shouldContain(""" +compiler.inlining.LateInlinePrinting$TestLateInlining::test1 (13 bytes) + @ 0 compiler.inlining.LateInlinePrinting$TestLateInlining::test3 (1 bytes) inline (hot) late inline succeeded + @ 3 compiler.inlining.LateInlinePrinting$TestLateInlining::testFailInline (1 bytes) failed to inline: disallowed by CompileCommand + @ 6 compiler.inlining.LateInlinePrinting$TestLateInlining::testFailInline (1 bytes) failed to inline: disallowed by CompileCommand + @ 9 compiler.inlining.LateInlinePrinting$TestLateInlining::test2 (7 bytes) inline (hot) late inline succeeded + @ 0 compiler.inlining.LateInlinePrinting$TestLateInlining::inlined1 (1 bytes) inline (hot) late inline succeeded + @ 3 compiler.inlining.LateInlinePrinting$TestLateInlining::inlined2 (1 bytes) inline (hot) late inline succeeded + """); + } +} diff --git a/test/hotspot/jtreg/compiler/inlining/TestDuplicatedLateInliningOutput.java b/test/hotspot/jtreg/compiler/inlining/TestDuplicatedLateInliningOutput.java index 2b967968ea268..ebc5a827ea4c0 100644 --- a/test/hotspot/jtreg/compiler/inlining/TestDuplicatedLateInliningOutput.java +++ b/test/hotspot/jtreg/compiler/inlining/TestDuplicatedLateInliningOutput.java @@ -45,12 +45,12 @@ public class TestDuplicatedLateInliningOutput { public static void main(String[] args) throws Exception { test( NonConstantReceiverLauncher.class, - "@ (\\d+)\\s+java\\.lang\\.invoke\\.LambdaForm\\$DMH\\/0x[0-9a-f]+::invokeStatic \\(\\d+ bytes\\)\\s+force inline by annotation", + "@ (\\d+)\\s+java\\.lang\\.invoke\\.MethodHandle::invokeBasic\\(\\)V \\(\\d+ bytes\\)\\s+failed to inline: receiver not constant\\s+callee changed to\\s+java\\.lang\\.invoke\\.LambdaForm\\$DMH\\/0x[0-9a-f]+::invokeStatic \\(\\d+ bytes\\)\\s+force inline by annotation\\s+late inline succeeded \\(method handle\\)", "@ (\\d+)\\s+java\\.lang\\.invoke\\.MethodHandle::invokeBasic\\(\\)V \\(\\d+ bytes\\)\\s+failed to inline: receiver not constant"); test( VirtualCallLauncher.class, - "@ (\\d+)\\s+compiler\\.inlining\\.TestDuplicatedLateInliningOutput\\$VirtualCallLauncher\\$B::lateInlined2 \\(\\d+ bytes\\)\\s+inline \\(hot\\)", + "@ (\\d+)\\s+compiler\\.inlining\\.TestDuplicatedLateInliningOutput\\$VirtualCallLauncher\\$A::lateInlined2 \\(\\d+ bytes\\)\\s+failed to inline: virtual call\\s+callee changed to\\s+\\s+compiler\\.inlining\\.TestDuplicatedLateInliningOutput\\$VirtualCallLauncher\\$B::lateInlined2 \\(\\d+ bytes\\)\\s+inline \\(hot\\)\\s+late inline succeeded", "@ (\\d+)\\s+compiler\\.inlining\\.TestDuplicatedLateInliningOutput\\$VirtualCallLauncher\\$A::lateInlined2 \\(\\d+ bytes\\)\\s+failed to inline: virtual call" ); } @@ -75,7 +75,7 @@ private static void test(Class launcher, String pattern1, String pattern2) th int index = IntStream.range(0, lines.size()) .filter(i -> lines.get(i).trim().matches(pattern1)) .findFirst() - .orElseThrow(() -> new Exception("No inlining found")); + .orElseThrow(() -> new Exception("No inlining found" + pattern1)); if (lines.get(index - 1).trim().matches(pattern2)) { throw new Exception("Both failure and success message found"); diff --git a/test/hotspot/jtreg/compiler/intrinsics/TestArrayGuardWithInterfaces.java b/test/hotspot/jtreg/compiler/intrinsics/TestArrayGuardWithInterfaces.java new file mode 100644 index 0000000000000..b9c26222c1604 --- /dev/null +++ b/test/hotspot/jtreg/compiler/intrinsics/TestArrayGuardWithInterfaces.java @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.lang.reflect.Array; +import jdk.test.lib.Asserts; + +/** + * @test + * @bug 8348631 + * @summary Test folding of array guards used by intrinsics. + * @library /test/lib + * @run main/othervm -Xcomp -XX:-TieredCompilation + * -XX:CompileCommand=compileonly,TestArrayGuardWithInterfaces::test* + * TestArrayGuardWithInterfaces + */ +public class TestArrayGuardWithInterfaces { + + public static interface MyInterface { } + + public static int test1(Object obj) { + // Should be folded, arrays can never imlement 'MyInterface' + return Array.getLength((MyInterface)obj); + } + + public static int test2(Object obj) { + // Should not be folded, arrays implement 'Cloneable' + return Array.getLength((Cloneable)obj); + } + + public static void main(String[] args) { + // Warmup + Class c = MyInterface.class; + Array.getLength(args); + + try { + test1(null); + throw new RuntimeException("No exception thrown"); + } catch (Exception e) { + // Expected + } + Asserts.assertEQ(test2(new int[1]), 1); + } +} diff --git a/test/hotspot/jtreg/compiler/intrinsics/TestContinuationPinningAndEA.java b/test/hotspot/jtreg/compiler/intrinsics/TestContinuationPinningAndEA.java new file mode 100644 index 0000000000000..e2ce8312eb3cf --- /dev/null +++ b/test/hotspot/jtreg/compiler/intrinsics/TestContinuationPinningAndEA.java @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 8347997 + * @summary Test that Continuation.pin() and unpin() intrinsics work with EA. + * @modules java.base/jdk.internal.vm + * @run main TestContinuationPinningAndEA + */ + +import jdk.internal.vm.Continuation; + +public class TestContinuationPinningAndEA { + + static class FailsEA { + final Object o; + + public FailsEA() throws Throwable { + o = new Object(); + Continuation.pin(); + Continuation.unpin(); + } + } + + static class Crashes { + final Object o; + + public Crashes() throws Throwable { + Continuation.pin(); + Continuation.unpin(); + o = new Object(); + } + } + + static void test_FailsEA() throws Throwable { + for (int i = 0; i < 10_000; ++i) { + new FailsEA(); + } + } + + static void test_Crashes() throws Throwable { + for (int i = 0; i < 10_000; ++i) { + new Crashes(); + } + } + + public static void main(String[] args) throws Throwable { + int iterations = 100; + for (int i = 0; i < iterations; ++i) { + test_FailsEA(); + } + for (int i = 0; i < iterations; ++i) { + test_Crashes(); + } + } +} diff --git a/test/hotspot/jtreg/compiler/jsr292/CallSiteDepContextTest.java b/test/hotspot/jtreg/compiler/jsr292/CallSiteDepContextTest.java index 3063c4e0f8752..2c740fa47dfb0 100644 --- a/test/hotspot/jtreg/compiler/jsr292/CallSiteDepContextTest.java +++ b/test/hotspot/jtreg/compiler/jsr292/CallSiteDepContextTest.java @@ -123,8 +123,8 @@ private static void execute(int expected, MethodHandle... mhs) throws Throwable public static void testHiddenDepField() { try { - Field f = MethodHandleHelper.MHN_CALL_SITE_CONTEXT_CLASS.getDeclaredField("vmdependencies"); - throw new AssertionError("Context.dependencies field should be hidden"); + Field f = MethodHandleHelper.JLI_CALL_SITE_CLASS.getDeclaredField("vmdependencies"); + throw new AssertionError("CallSite.dependencies field should be hidden"); } catch(NoSuchFieldException e) { /* expected */ } } diff --git a/test/hotspot/jtreg/compiler/jsr292/patches/java.base/java/lang/invoke/MethodHandleHelper.java b/test/hotspot/jtreg/compiler/jsr292/patches/java.base/java/lang/invoke/MethodHandleHelper.java index 614dc80799e2e..7e35d0c4e3d10 100644 --- a/test/hotspot/jtreg/compiler/jsr292/patches/java.base/java/lang/invoke/MethodHandleHelper.java +++ b/test/hotspot/jtreg/compiler/jsr292/patches/java.base/java/lang/invoke/MethodHandleHelper.java @@ -36,8 +36,8 @@ public class MethodHandleHelper { private MethodHandleHelper() { } public static final Lookup IMPL_LOOKUP = Lookup.IMPL_LOOKUP; - public static final Class MHN_CALL_SITE_CONTEXT_CLASS - = MethodHandleNatives.CallSiteContext.class; + public static final Class JLI_CALL_SITE_CLASS + = java.lang.invoke.CallSite.class; public static void customize(MethodHandle mh) { mh.customize(); diff --git a/test/hotspot/jtreg/compiler/loopopts/superword/TestMovingLoadBeforeStore.java b/test/hotspot/jtreg/compiler/loopopts/superword/TestMovingLoadBeforeStore.java index 45e3af0774193..72ca2cee3414b 100644 --- a/test/hotspot/jtreg/compiler/loopopts/superword/TestMovingLoadBeforeStore.java +++ b/test/hotspot/jtreg/compiler/loopopts/superword/TestMovingLoadBeforeStore.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -156,7 +156,7 @@ static void ref2(byte[] a, byte[] b) { static void test3(byte[] a) { for (int i = 51; i < 6000; i++) { - int adr = UNSAFE.ARRAY_BYTE_BASE_OFFSET + 42 + i; + long adr = UNSAFE.ARRAY_BYTE_BASE_OFFSET + 42 + i; UNSAFE.putIntUnaligned(a, adr + 0*4, UNSAFE.getIntUnaligned(a, adr + 0*4) + 1); UNSAFE.putIntUnaligned(a, adr + 1*4, UNSAFE.getIntUnaligned(a, adr + 1*4) + 1); UNSAFE.putIntUnaligned(a, adr + 2*4, UNSAFE.getIntUnaligned(a, adr + 2*4) + 1); @@ -171,7 +171,7 @@ static void test3(byte[] a) { static void ref3(byte[] a) { for (int i = 51; i < 6000; i++) { - int adr = UNSAFE.ARRAY_BYTE_BASE_OFFSET + 42 + i; + long adr = UNSAFE.ARRAY_BYTE_BASE_OFFSET + 42 + i; UNSAFE.putIntUnaligned(a, adr + 0*4, UNSAFE.getIntUnaligned(a, adr + 0*4) + 1); UNSAFE.putIntUnaligned(a, adr + 1*4, UNSAFE.getIntUnaligned(a, adr + 1*4) + 1); UNSAFE.putIntUnaligned(a, adr + 2*4, UNSAFE.getIntUnaligned(a, adr + 2*4) + 1); diff --git a/test/hotspot/jtreg/compiler/runtime/Test8010927.java b/test/hotspot/jtreg/compiler/runtime/Test8010927.java index 74aa824f2da02..3e90dc0c67cb8 100644 --- a/test/hotspot/jtreg/compiler/runtime/Test8010927.java +++ b/test/hotspot/jtreg/compiler/runtime/Test8010927.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -74,7 +74,7 @@ public class Test8010927 { static final Test8010927 elem = new Test8010927(); static final WhiteBox wb = WhiteBox.getWhiteBox(); - static final int obj_header_size = U.ARRAY_OBJECT_BASE_OFFSET; + static final int obj_header_size = (int) U.ARRAY_OBJECT_BASE_OFFSET; static final int heap_oop_size = wb.getHeapOopSize(); static final int card_size = 512; static final int one_card = (card_size - obj_header_size) / heap_oop_size; diff --git a/test/hotspot/jtreg/compiler/stringopts/TestFluidAndNonFluid.java b/test/hotspot/jtreg/compiler/stringopts/TestFluidAndNonFluid.java deleted file mode 100644 index 34fbaa1b6edad..0000000000000 --- a/test/hotspot/jtreg/compiler/stringopts/TestFluidAndNonFluid.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - - /* - * @test - * @bug 8341696 - * @summary Allow C2 to also optimize non-fluid string builder calls. - * @library /test/lib / - * @run driver compiler.c2.irTests.stringopts.TestFluidAndNonFluid - */ -package compiler.c2.irTests.stringopts; - -import compiler.lib.ir_framework.*; -import jdk.test.lib.Asserts; - -public class TestFluidAndNonFluid { - - public static int unknown = 1; - - public static void main(String[] args) { - // Dont inline any StringBuilder methods for this IR test to check if string opts are applied or not. - TestFramework.runWithFlags("-XX:CompileCommand=dontinline,java.lang.StringBuilder::*"); - } - - @DontInline - public static void opaque(StringBuilder builder) { - builder.append("Z"); - } - - @Run(test = {"fluid", "nonFluid", "nonFinal", "nonFluidExtraneousVariable", "nonFluidConditional", - "nonFluidOpaqueCall"}) - public void runMethod() { - Asserts.assertEQ("0ac", fluidNoParam()); - Asserts.assertEQ("ac", nonFluidNoParam()); - Asserts.assertEQ("ac", fluid("c")); - Asserts.assertEQ("ac", nonFluid("c")); - Asserts.assertEQ("ac", nonFinal("c")); - Asserts.assertEQ("ac", nonFluidExtraneousVariable("c")); - Asserts.assertEQ("ac", nonFluidConditional("c")); - Asserts.assertEQ("aZ", nonFluidOpaqueCall()); - } - - @Test - @IR(failOn = {IRNode.ALLOC_OF, "StringBuilder", IRNode.CALL_OF_METHOD, "toString", IRNode.INTRINSIC_TRAP}) - public static String fluidNoParam() { - return new StringBuilder("0").append("a").append("c").toString(); - } - - @Test - @IR(failOn = {IRNode.ALLOC_OF, "StringBuilder", IRNode.CALL_OF_METHOD, "toString", IRNode.INTRINSIC_TRAP}) - public static String nonFluidNoParam() { - final StringBuilder sb = new StringBuilder(); - sb.append("a"); - sb.append("c"); - return sb.toString(); - } - - @Test - @IR(failOn = {IRNode.ALLOC_OF, "StringBuilder", IRNode.CALL_OF_METHOD, "toString"}) - public static String fluid(String a) { - return new StringBuilder().append("a").append(a).toString(); - } - - @Test - @IR(failOn = {IRNode.ALLOC_OF, "StringBuilder", IRNode.CALL_OF_METHOD, "toString"}) - public static String nonFluid(String a) { - final StringBuilder sb = new StringBuilder(); - sb.append("a"); - sb.append(a); - return sb.toString(); - } - - @Test - @IR(failOn = {IRNode.ALLOC_OF, "StringBuilder", IRNode.CALL_OF_METHOD, "toString"}) - public static String nonFinal(String a) { - StringBuilder sb = new StringBuilder(); - sb.append("a"); - sb.append(a); - return sb.toString(); - } - - @Test - @IR(failOn = {IRNode.ALLOC_OF, "StringBuilder", IRNode.CALL_OF_METHOD, "toString"}) - public static String nonFluidExtraneousVariable(String a) { - final StringBuilder sb = new StringBuilder(); - final StringBuilder x = sb; - sb.append("a"); - x.append(a); - return sb.toString(); - } - - @Test - @IR(counts = {IRNode.ALLOC_OF, "StringBuilder", "1", IRNode.CALL_OF_METHOD, "toString", "1"}) - @IR(failOn = IRNode.INTRINSIC_TRAP) - static String nonFluidConditional(String a) { - final StringBuilder sb = new StringBuilder(); - sb.append("a"); - if (unknown == 1) { - sb.append(a); - } - return sb.toString(); - } - - @Test - @IR(counts = {IRNode.ALLOC_OF, "StringBuilder", "1", IRNode.CALL_OF_METHOD, "toString", "1"}) - @IR(failOn = IRNode.INTRINSIC_TRAP) - static String nonFluidOpaqueCall() { - final StringBuilder sb = new StringBuilder(); - sb.append("a"); - opaque(sb); - return sb.toString(); - } - -} diff --git a/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestBoolean.java b/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestBoolean.java index 98f638abe2d5b..2a4015daca209 100644 --- a/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestBoolean.java +++ b/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestBoolean.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +60,7 @@ public class JdkInternalMiscUnsafeAccessTestBoolean { static final long STATIC_V_OFFSET; - static int ARRAY_OFFSET; + static long ARRAY_OFFSET; static int ARRAY_SHIFT; diff --git a/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestByte.java b/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestByte.java index e149f6bd07bb1..8b188cb7cb52e 100644 --- a/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestByte.java +++ b/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestByte.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +60,7 @@ public class JdkInternalMiscUnsafeAccessTestByte { static final long STATIC_V_OFFSET; - static int ARRAY_OFFSET; + static long ARRAY_OFFSET; static int ARRAY_SHIFT; diff --git a/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestChar.java b/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestChar.java index 62d926b66a1df..4b5b876d50720 100644 --- a/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestChar.java +++ b/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestChar.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +60,7 @@ public class JdkInternalMiscUnsafeAccessTestChar { static final long STATIC_V_OFFSET; - static int ARRAY_OFFSET; + static long ARRAY_OFFSET; static int ARRAY_SHIFT; diff --git a/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestDouble.java b/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestDouble.java index 2f90f0d7c7b0a..5334da98714d0 100644 --- a/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestDouble.java +++ b/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestDouble.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +60,7 @@ public class JdkInternalMiscUnsafeAccessTestDouble { static final long STATIC_V_OFFSET; - static int ARRAY_OFFSET; + static long ARRAY_OFFSET; static int ARRAY_SHIFT; diff --git a/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestFloat.java b/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestFloat.java index f0256e7815d5a..946f6d7d5dede 100644 --- a/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestFloat.java +++ b/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestFloat.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +60,7 @@ public class JdkInternalMiscUnsafeAccessTestFloat { static final long STATIC_V_OFFSET; - static int ARRAY_OFFSET; + static long ARRAY_OFFSET; static int ARRAY_SHIFT; diff --git a/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestInt.java b/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestInt.java index 038cd2151b1b4..93ed85805d5b6 100644 --- a/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestInt.java +++ b/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestInt.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +60,7 @@ public class JdkInternalMiscUnsafeAccessTestInt { static final long STATIC_V_OFFSET; - static int ARRAY_OFFSET; + static long ARRAY_OFFSET; static int ARRAY_SHIFT; diff --git a/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestLong.java b/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestLong.java index cffec14ad7259..7a6d2bfc033f0 100644 --- a/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestLong.java +++ b/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestLong.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +60,7 @@ public class JdkInternalMiscUnsafeAccessTestLong { static final long STATIC_V_OFFSET; - static int ARRAY_OFFSET; + static long ARRAY_OFFSET; static int ARRAY_SHIFT; diff --git a/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestObject.java b/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestObject.java index 0c74c295e2133..2fe5d7f3ae051 100644 --- a/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestObject.java +++ b/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestObject.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +60,7 @@ public class JdkInternalMiscUnsafeAccessTestObject { static final long STATIC_V_OFFSET; - static int ARRAY_OFFSET; + static long ARRAY_OFFSET; static int ARRAY_SHIFT; diff --git a/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestShort.java b/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestShort.java index 770677a98d394..47a700d9313c7 100644 --- a/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestShort.java +++ b/test/hotspot/jtreg/compiler/unsafe/JdkInternalMiscUnsafeAccessTestShort.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +60,7 @@ public class JdkInternalMiscUnsafeAccessTestShort { static final long STATIC_V_OFFSET; - static int ARRAY_OFFSET; + static long ARRAY_OFFSET; static int ARRAY_SHIFT; diff --git a/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestBoolean.java b/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestBoolean.java index c176277d2bad2..fcb0b3fbef379 100644 --- a/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestBoolean.java +++ b/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestBoolean.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +60,7 @@ public class SunMiscUnsafeAccessTestBoolean { static final long STATIC_V_OFFSET; - static int ARRAY_OFFSET; + static long ARRAY_OFFSET; static int ARRAY_SHIFT; diff --git a/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestByte.java b/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestByte.java index b205b1d33764a..7343463c41d88 100644 --- a/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestByte.java +++ b/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestByte.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +60,7 @@ public class SunMiscUnsafeAccessTestByte { static final long STATIC_V_OFFSET; - static int ARRAY_OFFSET; + static long ARRAY_OFFSET; static int ARRAY_SHIFT; diff --git a/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestChar.java b/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestChar.java index 114ed7e9b5a7b..4c5e02bea2665 100644 --- a/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestChar.java +++ b/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestChar.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +60,7 @@ public class SunMiscUnsafeAccessTestChar { static final long STATIC_V_OFFSET; - static int ARRAY_OFFSET; + static long ARRAY_OFFSET; static int ARRAY_SHIFT; diff --git a/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestDouble.java b/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestDouble.java index d813ea73e702e..5503dcd1840f9 100644 --- a/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestDouble.java +++ b/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestDouble.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +60,7 @@ public class SunMiscUnsafeAccessTestDouble { static final long STATIC_V_OFFSET; - static int ARRAY_OFFSET; + static long ARRAY_OFFSET; static int ARRAY_SHIFT; diff --git a/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestFloat.java b/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestFloat.java index f0482c826376a..f050c5bb17b8b 100644 --- a/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestFloat.java +++ b/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestFloat.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +60,7 @@ public class SunMiscUnsafeAccessTestFloat { static final long STATIC_V_OFFSET; - static int ARRAY_OFFSET; + static long ARRAY_OFFSET; static int ARRAY_SHIFT; diff --git a/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestInt.java b/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestInt.java index 9326539f4efee..85f82915e03aa 100644 --- a/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestInt.java +++ b/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestInt.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +60,7 @@ public class SunMiscUnsafeAccessTestInt { static final long STATIC_V_OFFSET; - static int ARRAY_OFFSET; + static long ARRAY_OFFSET; static int ARRAY_SHIFT; diff --git a/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestLong.java b/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestLong.java index 30bc9551dd50c..1a73cfe308654 100644 --- a/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestLong.java +++ b/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestLong.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +60,7 @@ public class SunMiscUnsafeAccessTestLong { static final long STATIC_V_OFFSET; - static int ARRAY_OFFSET; + static long ARRAY_OFFSET; static int ARRAY_SHIFT; diff --git a/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestObject.java b/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestObject.java index add15ff77fdd1..1aab41ddc9b6d 100644 --- a/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestObject.java +++ b/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestObject.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +60,7 @@ public class SunMiscUnsafeAccessTestObject { static final long STATIC_V_OFFSET; - static int ARRAY_OFFSET; + static long ARRAY_OFFSET; static int ARRAY_SHIFT; diff --git a/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestShort.java b/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestShort.java index 4ea81f026e730..123b655220c63 100644 --- a/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestShort.java +++ b/test/hotspot/jtreg/compiler/unsafe/SunMiscUnsafeAccessTestShort.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +60,7 @@ public class SunMiscUnsafeAccessTestShort { static final long STATIC_V_OFFSET; - static int ARRAY_OFFSET; + static long ARRAY_OFFSET; static int ARRAY_SHIFT; diff --git a/test/hotspot/jtreg/compiler/unsafe/X-UnsafeAccessTest.java.template b/test/hotspot/jtreg/compiler/unsafe/X-UnsafeAccessTest.java.template index 2fc22fa360ab9..f5eadc218e449 100644 --- a/test/hotspot/jtreg/compiler/unsafe/X-UnsafeAccessTest.java.template +++ b/test/hotspot/jtreg/compiler/unsafe/X-UnsafeAccessTest.java.template @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -64,7 +64,7 @@ public class $Qualifier$UnsafeAccessTest$Type$ { static final long STATIC_V_OFFSET; - static int ARRAY_OFFSET; + static long ARRAY_OFFSET; static int ARRAY_SHIFT; diff --git a/test/hotspot/jtreg/gc/arguments/TestMaxMinHeapFreeRatioFlags.java b/test/hotspot/jtreg/gc/arguments/TestMaxMinHeapFreeRatioFlags.java index 477632e3e7783..1b8cc143fabc5 100644 --- a/test/hotspot/jtreg/gc/arguments/TestMaxMinHeapFreeRatioFlags.java +++ b/test/hotspot/jtreg/gc/arguments/TestMaxMinHeapFreeRatioFlags.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -138,7 +138,7 @@ public static class RatioVerifier { // Size of byte array that will be allocated public static final int CHUNK_SIZE = 1024; // Length of byte array, that will be added to "garbage" list. - public static final int ARRAY_LENGTH = CHUNK_SIZE - Unsafe.ARRAY_BYTE_BASE_OFFSET; + public static final int ARRAY_LENGTH = CHUNK_SIZE - (int) Unsafe.ARRAY_BYTE_BASE_OFFSET; // Amount of tries to force heap shrinking/expansion using GC public static final int GC_TRIES = 10; diff --git a/test/hotspot/jtreg/gc/arguments/TestTargetSurvivorRatioFlag.java b/test/hotspot/jtreg/gc/arguments/TestTargetSurvivorRatioFlag.java index adb609f547ce5..a80b16b96a4c9 100644 --- a/test/hotspot/jtreg/gc/arguments/TestTargetSurvivorRatioFlag.java +++ b/test/hotspot/jtreg/gc/arguments/TestTargetSurvivorRatioFlag.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -254,7 +254,7 @@ public static class TargetSurvivorRatioVerifier { // Desired size of memory allocated at once public static final int CHUNK_SIZE = 1024; // Length of byte[] array that will have occupy CHUNK_SIZE bytes in heap - public static final int ARRAY_LENGTH = CHUNK_SIZE - Unsafe.ARRAY_BYTE_BASE_OFFSET; + public static final int ARRAY_LENGTH = CHUNK_SIZE - (int) Unsafe.ARRAY_BYTE_BASE_OFFSET; public static void main(String args[]) throws Exception { if (args.length != 1) { diff --git a/test/hotspot/jtreg/runtime/FieldLayout/BaseOffsets.java b/test/hotspot/jtreg/runtime/FieldLayout/BaseOffsets.java index e4f88b5d8d5c5..b2a6a22fb179e 100644 --- a/test/hotspot/jtreg/runtime/FieldLayout/BaseOffsets.java +++ b/test/hotspot/jtreg/runtime/FieldLayout/BaseOffsets.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -110,8 +110,8 @@ static class LIClass { public static final WhiteBox WB = WhiteBox.getWhiteBox(); static final long INT_OFFSET; - static final int INT_ARRAY_OFFSET; - static final int LONG_ARRAY_OFFSET; + static final long INT_ARRAY_OFFSET; + static final long LONG_ARRAY_OFFSET; static { if (!Platform.is64bit() || WB.getBooleanVMFlag("UseCompactObjectHeaders")) { INT_OFFSET = 8; @@ -151,7 +151,7 @@ static public void main(String[] args) { Asserts.assertEquals(unsafe.arrayBaseOffset(double[].class), LONG_ARRAY_OFFSET, "Misplaced double array base"); boolean narrowOops = System.getProperty("java.vm.compressedOopsMode") != null || !Platform.is64bit(); - int expected_objary_offset = narrowOops ? INT_ARRAY_OFFSET : LONG_ARRAY_OFFSET; + long expected_objary_offset = narrowOops ? INT_ARRAY_OFFSET : LONG_ARRAY_OFFSET; Asserts.assertEquals(unsafe.arrayBaseOffset(Object[].class), expected_objary_offset, "Misplaced object array base"); } } diff --git a/test/hotspot/jtreg/runtime/Safepoint/TestAbortOnVMOperationTimeout.java b/test/hotspot/jtreg/runtime/Safepoint/TestAbortOnVMOperationTimeout.java index a2c3f944db86b..d14c9627314e3 100644 --- a/test/hotspot/jtreg/runtime/Safepoint/TestAbortOnVMOperationTimeout.java +++ b/test/hotspot/jtreg/runtime/Safepoint/TestAbortOnVMOperationTimeout.java @@ -68,6 +68,7 @@ public static void testWith(int delay, boolean shouldPass) throws Exception { "-XX:+AbortVMOnVMOperationTimeout", "-XX:AbortVMOnVMOperationTimeoutDelay=" + delay, "-Xmx256m", + "-XX:NewSize=64m", "-XX:+UseSerialGC", "-XX:-CreateCoredumpOnCrash", "-Xlog:gc*=info", diff --git a/test/hotspot/jtreg/runtime/Unsafe/GetField.java b/test/hotspot/jtreg/runtime/Unsafe/GetField.java index 3772fa6e8db7f..88ac98135f1a7 100644 --- a/test/hotspot/jtreg/runtime/Unsafe/GetField.java +++ b/test/hotspot/jtreg/runtime/Unsafe/GetField.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,11 +37,11 @@ public class GetField { public static void main(String args[]) throws Exception { Unsafe unsafe = Unsafe.getUnsafe(); - // Unsafe.INVALID_FIELD_OFFSET is a static final int field, + // Unsafe.INVALID_FIELD_OFFSET is a static final long field, // make sure getField returns the correct field Field field = Unsafe.class.getField("INVALID_FIELD_OFFSET"); assertNotEquals(field.getModifiers() & Modifier.FINAL, 0); assertNotEquals(field.getModifiers() & Modifier.STATIC, 0); - assertEquals(field.getType(), int.class); + assertEquals(field.getType(), long.class); } } diff --git a/test/hotspot/jtreg/runtime/Unsafe/InternalErrorTest.java b/test/hotspot/jtreg/runtime/Unsafe/InternalErrorTest.java index bc125a074eeb6..edb80bc6cebea 100644 --- a/test/hotspot/jtreg/runtime/Unsafe/InternalErrorTest.java +++ b/test/hotspot/jtreg/runtime/Unsafe/InternalErrorTest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -147,7 +147,7 @@ public static void test(MappedByteBuffer buffer, Unsafe unsafe, long mapAddr, lo break; case 1: // testing Unsafe.copySwapMemory, trying to access next page after truncation. - int destOffset = Unsafe.ARRAY_BYTE_BASE_OFFSET; + long destOffset = Unsafe.ARRAY_BYTE_BASE_OFFSET; unsafe.copySwapMemory(null, mapAddr + pageSize, new byte[4000], destOffset, 2000, 2); break; case 2: diff --git a/test/hotspot/jtreg/runtime/cds/appcds/AOTFlags.java b/test/hotspot/jtreg/runtime/cds/appcds/AOTFlags.java index 3a678eefc5b2b..98ff155abface 100644 --- a/test/hotspot/jtreg/runtime/cds/appcds/AOTFlags.java +++ b/test/hotspot/jtreg/runtime/cds/appcds/AOTFlags.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -59,7 +59,7 @@ static void positiveTests() throws Exception { out.shouldContain("Hello World"); out.shouldHaveExitValue(0); - // (2) Assembly Phase + // (2) Assembly Phase (AOTClassLinking unspecified -> should be enabled by default) pb = ProcessTools.createLimitedTestJavaProcessBuilder( "-XX:AOTMode=create", "-XX:AOTConfiguration=" + aotConfigFile, @@ -77,6 +77,7 @@ static void positiveTests() throws Exception { "-Xlog:cds", "-cp", appJar, helloClass); out = CDSTestUtils.executeAndLog(pb, "prod"); + out.shouldContain("Using AOT-linked classes: true (static archive: has aot-linked classes)"); out.shouldContain("Opened archive hello.aot."); out.shouldContain("Hello World"); out.shouldHaveExitValue(0); @@ -107,7 +108,7 @@ static void positiveTests() throws Exception { out.shouldContain("Hello World"); out.shouldHaveExitValue(0); - // (5) AOTMode=on + // (6) AOTMode=on pb = ProcessTools.createLimitedTestJavaProcessBuilder( "-XX:AOTCache=" + aotCacheFile, "--show-version", @@ -119,6 +120,30 @@ static void positiveTests() throws Exception { out.shouldContain("Opened archive hello.aot."); out.shouldContain("Hello World"); out.shouldHaveExitValue(0); + + // (7) Assembly Phase with -XX:-AOTClassLinking + pb = ProcessTools.createLimitedTestJavaProcessBuilder( + "-XX:AOTMode=create", + "-XX:-AOTClassLinking", + "-XX:AOTConfiguration=" + aotConfigFile, + "-XX:AOTCache=" + aotCacheFile, + "-Xlog:cds", + "-cp", appJar); + out = CDSTestUtils.executeAndLog(pb, "asm"); + out.shouldContain("Dumping shared data to file:"); + out.shouldMatch("cds.*hello[.]aot"); + out.shouldHaveExitValue(0); + + // (8) Production Run with AOTCache, which was created with -XX:-AOTClassLinking + pb = ProcessTools.createLimitedTestJavaProcessBuilder( + "-XX:AOTCache=" + aotCacheFile, + "-Xlog:cds", + "-cp", appJar, helloClass); + out = CDSTestUtils.executeAndLog(pb, "prod"); + out.shouldContain("Using AOT-linked classes: false (static archive: no aot-linked classes)"); + out.shouldContain("Opened archive hello.aot."); + out.shouldContain("Hello World"); + out.shouldHaveExitValue(0); } static void negativeTests() throws Exception { diff --git a/test/hotspot/jtreg/runtime/cds/appcds/aotClassLinking/BadOldClassA.jasm b/test/hotspot/jtreg/runtime/cds/appcds/aotClassLinking/BadOldClassA.jasm new file mode 100644 index 0000000000000..724d7c03584ad --- /dev/null +++ b/test/hotspot/jtreg/runtime/cds/appcds/aotClassLinking/BadOldClassA.jasm @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +super public class BadOldClassA + version 49:0 +{ + + +public Method "":"()V" + stack 1 locals 1 +{ + aload_0; + invokespecial Method java/lang/Object."":"()V"; + return; +} + + /* + * The following method tries to return an Object as a String. + * Verifier should fail. + */ +public Method doit:"()Ljava/lang/String;" + stack 2 locals 1 +{ + new class java/lang/Object; + dup; + invokespecial Method java/lang/Object."":"()V"; + astore_0; + aload_0; + areturn; // tries to return an Object as a String +} + +} // end Class BadOldClassA diff --git a/test/hotspot/jtreg/runtime/cds/appcds/aotClassLinking/BadOldClassB.jasm b/test/hotspot/jtreg/runtime/cds/appcds/aotClassLinking/BadOldClassB.jasm new file mode 100644 index 0000000000000..ce2f101a588eb --- /dev/null +++ b/test/hotspot/jtreg/runtime/cds/appcds/aotClassLinking/BadOldClassB.jasm @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +super public class BadOldClassB + version 49:0 +{ + + +public Method "":"()V" + stack 1 locals 1 +{ + aload_0; + invokespecial Method java/lang/Object."":"()V"; + return; +} + + /* + * The following method tries to return an Object as a String. + * Verifier should fail. + */ +public Method doit:"()Ljava/lang/String;" + stack 2 locals 1 +{ + new class java/lang/Object; + dup; + invokespecial Method java/lang/Object."":"()V"; + astore_0; + aload_0; + areturn; // tries to return an Object as a String +} + +} // end Class BadOldClassB diff --git a/test/hotspot/jtreg/runtime/cds/appcds/aotClassLinking/BulkLoaderTest.java b/test/hotspot/jtreg/runtime/cds/appcds/aotClassLinking/BulkLoaderTest.java index 25361403481ee..c4648bd2eb37b 100644 --- a/test/hotspot/jtreg/runtime/cds/appcds/aotClassLinking/BulkLoaderTest.java +++ b/test/hotspot/jtreg/runtime/cds/appcds/aotClassLinking/BulkLoaderTest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,9 +32,10 @@ * @comment work around JDK-8345635 * @requires !vm.jvmci.enabled * @library /test/jdk/lib/testlibrary /test/lib - * @build InitiatingLoaderTester + * @build InitiatingLoaderTester BadOldClassA BadOldClassB * @build BulkLoaderTest * @run driver jdk.test.lib.helpers.ClassFileInstaller -jar BulkLoaderTestApp.jar BulkLoaderTestApp MyUtil InitiatingLoaderTester + * BadOldClassA BadOldClassB * @run driver BulkLoaderTest STATIC */ @@ -44,9 +45,10 @@ * @comment work around JDK-8345635 * @requires !vm.jvmci.enabled * @library /test/jdk/lib/testlibrary /test/lib - * @build InitiatingLoaderTester + * @build InitiatingLoaderTester BadOldClassA BadOldClassB * @build jdk.test.whitebox.WhiteBox BulkLoaderTest * @run driver jdk.test.lib.helpers.ClassFileInstaller -jar BulkLoaderTestApp.jar BulkLoaderTestApp MyUtil InitiatingLoaderTester + * BadOldClassA BadOldClassB * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:. BulkLoaderTest DYNAMIC */ @@ -129,6 +131,7 @@ class BulkLoaderTestApp { public static void main(String args[]) throws Exception { checkClasses(); checkInitiatingLoader(); + checkOldClasses(); } // Check the ClassLoader/Module/Package/ProtectionDomain/CodeSource of classes that are aot-linked @@ -229,6 +232,29 @@ static void checkInitiatingLoader() throws Exception { throw new RuntimeException("Should not have succeeded"); } + + static void checkOldClasses() throws Exception { + // Resolve BadOldClassA from the constant pool without linking it. + // implNote: BadOldClassA will be excluded, so any resolved refereces + // to BadOldClassA should be removed from the archived constant pool. + Class c = BadOldClassA.class; + Object n = new Object(); + if (c.isInstance(n)) { // Note that type-testing BadOldClassA here neither links nor initializes it. + throw new RuntimeException("Must not succeed"); + } + + try { + // In dynamic dump, the VM loads BadOldClassB and then attempts to + // link it. This will leave BadOldClassB in a "failed verification" state. + // All refernces to BadOldClassB from the CP should be purged from the CDS + // archive. + c = BadOldClassB.class; + c.newInstance(); + throw new RuntimeException("Must not succeed"); + } catch (VerifyError e) { + System.out.println("Caught VerifyError for BadOldClassB: " + e); + } + } } class MyUtil { diff --git a/test/hotspot/jtreg/vmTestbase/nsk/jvmti/SetLocalVariable/setlocal001.java b/test/hotspot/jtreg/vmTestbase/nsk/jvmti/SetLocalVariable/setlocal001.java index 757102f0c1c49..8ac20f89eaa07 100644 --- a/test/hotspot/jtreg/vmTestbase/nsk/jvmti/SetLocalVariable/setlocal001.java +++ b/test/hotspot/jtreg/vmTestbase/nsk/jvmti/SetLocalVariable/setlocal001.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -80,6 +80,10 @@ public double meth01() { float f = 0f; double d = 0; checkPoint(); + if (currThread.isVirtual()) { + out.println("meth01: skipping results check for virtual thread"); + return d + f + 1; // SetLocal* should return OPAQUE_FRAME for a virtual thread + } if (l != 22L || f != floatVal || d != doubleVal) { out.println("meth01: l =" + l + " f = " + f + " d = " + d); result = 2; @@ -97,6 +101,10 @@ public void meth02(int step) { meth02(step - 1); } else { checkPoint(); + if (currThread.isVirtual()) { + out.println("meth02: skipping results check for virtual thread"); + return; // SetLocal* should return OPAQUE_FRAME for a virtual thread + } if (i1 != 1 || i2 != 1 || i3 != 1 || i4 != 1 || !i5) { out.println("meth02: i1 =" + i1 + " i2 = " + i2 + " i3 = " + i3 + " i4 = " + i4 + " i5 = " + i5); @@ -109,6 +117,10 @@ public static void meth03() { setlocal001 ob1 = null; int[] ob2 = null; checkPoint(); + if (currThread.isVirtual()) { + out.println("meth03: skipping results check for virtual thread"); + return; // SetLocalObject for obj1 and obj2 should return OPAQUE_FRAME for a virtual thread + } if (ob1.val != 3 || ob2[2] != 8) { out.println("meth03: ob1.val =" + ob1.val + " ob2[2] = " + ob2[2]); result = 2; @@ -118,6 +130,10 @@ public static void meth03() { public static void meth04(int i1, long l, short i2, double d, char i3, float f, byte i4, boolean b) { checkPoint(); + if (currThread.isVirtual()) { + out.println("meth04: skipping results check for virtual thread"); + return; // SetLocal* should return OPAQUE_FRAME for a virtual thread + } if (i1 != 1 || i2 != 2 || i3 != 3 || i4 != 4 || l != 22L || f != floatVal || d != doubleVal || !b) { out.println("meth04: i1 =" + i1 + " i2 = " + i2 + diff --git a/test/hotspot/jtreg/vmTestbase/nsk/jvmti/SetLocalVariable/setlocal001/setlocal001.cpp b/test/hotspot/jtreg/vmTestbase/nsk/jvmti/SetLocalVariable/setlocal001/setlocal001.cpp index 8193058eba76d..9ad953e56f962 100644 --- a/test/hotspot/jtreg/vmTestbase/nsk/jvmti/SetLocalVariable/setlocal001/setlocal001.cpp +++ b/test/hotspot/jtreg/vmTestbase/nsk/jvmti/SetLocalVariable/setlocal001/setlocal001.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,6 +44,13 @@ static jdouble doubleVal; static jobject objVal; static jobject arrVal; +static void check_error(jvmtiError err, bool is_virtual, const char* func_id) { + if (err != JVMTI_ERROR_NONE && !(is_virtual && err == JVMTI_ERROR_OPAQUE_FRAME)) { + printf("(%s) unexpected error: %s (%d)\n", func_id, TranslateError(err), err); + result = STATUS_FAILED; + } +} + void JNICALL Breakpoint(jvmtiEnv *jvmti_env, JNIEnv *env, jthread thr, jmethodID method, jlocation location) { jvmtiError err; @@ -51,6 +58,7 @@ void JNICALL Breakpoint(jvmtiEnv *jvmti_env, JNIEnv *env, jlocation loc; jint entryCount; jvmtiLocalVariableEntry *table = nullptr; + bool is_virtual = env->IsVirtualThread(thr); int i; err = jvmti_env->GetFrameLocation(thr, 1, &mid, &loc); @@ -74,27 +82,15 @@ void JNICALL Breakpoint(jvmtiEnv *jvmti_env, JNIEnv *env, if (strcmp(table[i].name, "l") == 0) { err = jvmti_env->SetLocalLong(thr, 1, table[i].slot, longVal); - if (err != JVMTI_ERROR_NONE) { - printf("(SetLocalLong) unexpected error: %s (%d)\n", - TranslateError(err), err); - result = STATUS_FAILED; - } + check_error(err, is_virtual, "SetLocalLong"); } else if (strcmp(table[i].name, "f") == 0) { err = jvmti_env->SetLocalFloat(thr, 1, table[i].slot, floatVal); - if (err != JVMTI_ERROR_NONE) { - printf("(SetLocalFloat) unexpected error: %s (%d)\n", - TranslateError(err), err); - result = STATUS_FAILED; - } + check_error(err, is_virtual, "SetLocalFloat"); } else if (strcmp(table[i].name, "d") == 0) { err = jvmti_env->SetLocalDouble(thr, 1, table[i].slot, doubleVal); - if (err != JVMTI_ERROR_NONE) { - printf("(SetLocalDouble) unexpected error: %s (%d)\n", - TranslateError(err), err); - result = STATUS_FAILED; - } + check_error(err, is_virtual, "SetLocalDouble"); } } } else if (mid == mid2) { @@ -102,43 +98,23 @@ void JNICALL Breakpoint(jvmtiEnv *jvmti_env, JNIEnv *env, if (strcmp(table[i].name, "i1") == 0) { err = jvmti_env->SetLocalInt(thr, 1, table[i].slot, 1); - if (err != JVMTI_ERROR_NONE) { - printf("(SetLocalInt#i1) unexpected error: %s (%d)\n", - TranslateError(err), err); - result = STATUS_FAILED; - } + check_error(err, is_virtual, "SetLocalInt#i1"); } else if (strcmp(table[i].name, "i2") == 0) { err = jvmti_env->SetLocalInt(thr, 1, table[i].slot, 1); - if (err != JVMTI_ERROR_NONE) { - printf("(SetLocalInt#i2) unexpected error: %s (%d)\n", - TranslateError(err), err); - result = STATUS_FAILED; - } + check_error(err, is_virtual, "SetLocalInt#i2"); } else if (strcmp(table[i].name, "i3") == 0) { err = jvmti_env->SetLocalInt(thr, 1, table[i].slot, 1); - if (err != JVMTI_ERROR_NONE) { - printf("(SetLocalInt#i3) unexpected error: %s (%d)\n", - TranslateError(err), err); - result = STATUS_FAILED; - } + check_error(err, is_virtual, "SetLocalInt#i3"); } else if (strcmp(table[i].name, "i4") == 0) { err = jvmti_env->SetLocalInt(thr, 1, table[i].slot, 1); - if (err != JVMTI_ERROR_NONE) { - printf("(SetLocalInt#i4) unexpected error: %s (%d)\n", - TranslateError(err), err); - result = STATUS_FAILED; - } + check_error(err, is_virtual, "SetLocalInt#i4"); } else if (strcmp(table[i].name, "i5") == 0) { err = jvmti_env->SetLocalInt(thr, 1, table[i].slot, 1); - if (err != JVMTI_ERROR_NONE) { - printf("(SetLocalInt#i5) unexpected error: %s (%d)\n", - TranslateError(err), err); - result = STATUS_FAILED; - } + check_error(err, is_virtual, "SetLocalInt#i5"); } } } else if (mid == mid3) { @@ -146,19 +122,11 @@ void JNICALL Breakpoint(jvmtiEnv *jvmti_env, JNIEnv *env, if (strcmp(table[i].name, "ob1") == 0) { err = jvmti_env->SetLocalObject(thr, 1, table[i].slot, objVal); - if (err != JVMTI_ERROR_NONE) { - printf("(SetLocalObject#ob1) unexpected error: %s (%d)\n", - TranslateError(err), err); - result = STATUS_FAILED; - } + check_error(err, is_virtual, "SetLocalObject#ob1"); } else if (strcmp(table[i].name, "ob2") == 0) { err = jvmti_env->SetLocalObject(thr, 1, table[i].slot, arrVal); - if (err != JVMTI_ERROR_NONE) { - printf("(SetLocalObject#ob2) unexpected error: %s (%d)\n", - TranslateError(err), err); - result = STATUS_FAILED; - } + check_error(err, is_virtual, "SetLocalObject#ob2"); } } } else if (mid == mid4) { @@ -166,67 +134,35 @@ void JNICALL Breakpoint(jvmtiEnv *jvmti_env, JNIEnv *env, if (strcmp(table[i].name, "i1") == 0) { err = jvmti_env->SetLocalInt(thr, 1, table[i].slot, 1); - if (err != JVMTI_ERROR_NONE) { - printf("(SetLocalInt#i1,param) unexpected error: %s (%d)\n", - TranslateError(err), err); - result = STATUS_FAILED; - } + check_error(err, is_virtual, "SetLocalInt#i1,param"); } else if (strcmp(table[i].name, "i2") == 0) { err = jvmti_env->SetLocalInt(thr, 1, table[i].slot, 2); - if (err != JVMTI_ERROR_NONE) { - printf("(SetLocalInt#i2,param) unexpected error: %s (%d)\n", - TranslateError(err), err); - result = STATUS_FAILED; - } + check_error(err, is_virtual, "SetLocalInt#i2,param"); } else if (strcmp(table[i].name, "i3") == 0) { err = jvmti_env->SetLocalInt(thr, 1, table[i].slot, 3); - if (err != JVMTI_ERROR_NONE) { - printf("(SetLocalInt#i3,param) unexpected error: %s (%d)\n", - TranslateError(err), err); - result = STATUS_FAILED; - } + check_error(err, is_virtual, "SetLocalInt#i3,param"); } else if (strcmp(table[i].name, "i4") == 0) { err = jvmti_env->SetLocalInt(thr, 1, table[i].slot, 4); - if (err != JVMTI_ERROR_NONE) { - printf("(SetLocalInt#i4,param) unexpected error: %s (%d)\n", - TranslateError(err), err); - result = STATUS_FAILED; - } + check_error(err, is_virtual, "SetLocalInt#i4,param"); } else if (strcmp(table[i].name, "b") == 0) { err = jvmti_env->SetLocalInt(thr, 1, table[i].slot, JNI_TRUE); - if (err != JVMTI_ERROR_NONE) { - printf("(SetLocalInt#b,param) unexpected error: %s (%d)\n", - TranslateError(err), err); - result = STATUS_FAILED; - } + check_error(err, is_virtual, "SetLocalInt#b,param"); } else if (strcmp(table[i].name, "l") == 0) { err = jvmti_env->SetLocalLong(thr, 1, table[i].slot, longVal); - if (err != JVMTI_ERROR_NONE) { - printf("(SetLocalLong,param) unexpected error: %s (%d)\n", - TranslateError(err), err); - result = STATUS_FAILED; - } + check_error(err, is_virtual, "SetLocalLong,param"); } else if (strcmp(table[i].name, "f") == 0) { err = jvmti_env->SetLocalFloat(thr, 1, table[i].slot, floatVal); - if (err != JVMTI_ERROR_NONE) { - printf("(SetLocalFloat,param) unexpected error: %s (%d)\n", - TranslateError(err), err); - result = STATUS_FAILED; - } + check_error(err, is_virtual, "SetLocalFloat,param"); } else if (strcmp(table[i].name, "d") == 0) { err = jvmti_env->SetLocalDouble(thr, 1, table[i].slot, doubleVal); - if (err != JVMTI_ERROR_NONE) { - printf("(SetLocalDouble,param) unexpected error: %s (%d)\n", - TranslateError(err), err); - result = STATUS_FAILED; - } + check_error(err, is_virtual, "SetLocalDouble,param"); } } } else { diff --git a/test/hotspot/jtreg/vmTestbase/nsk/jvmti/SetLocalVariable/setlocal003/setlocal003.cpp b/test/hotspot/jtreg/vmTestbase/nsk/jvmti/SetLocalVariable/setlocal003/setlocal003.cpp index 793f22ec40fc7..c366655a7350a 100644 --- a/test/hotspot/jtreg/vmTestbase/nsk/jvmti/SetLocalVariable/setlocal003/setlocal003.cpp +++ b/test/hotspot/jtreg/vmTestbase/nsk/jvmti/SetLocalVariable/setlocal003/setlocal003.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,6 +40,15 @@ static jvmtiEventCallbacks callbacks; static jint result = PASSED; static jboolean printdump = JNI_FALSE; +static void check_error(jvmtiError err, bool is_virtual, char* var_name) { + if (err != JVMTI_ERROR_INVALID_SLOT && !(is_virtual && err == JVMTI_ERROR_OPAQUE_FRAME)) { + printf("(%s) ", var_name); + printf("Error expected: JVMTI_ERROR_INVALID_SLOT or JVMTI_ERROR_OPAQUE_FRAME,\n"); + printf("\t actual: %s (%d)\n", TranslateError(err), err); + result = STATUS_FAILED; + } +} + void JNICALL Breakpoint(jvmtiEnv *jvmti_env, JNIEnv *env, jthread thr, jmethodID method, jlocation location) { jvmtiError err; @@ -47,6 +56,7 @@ void JNICALL Breakpoint(jvmtiEnv *jvmti_env, JNIEnv *env, jlocation loc; jint entryCount; jvmtiLocalVariableEntry *table; + bool is_virtual = env->IsVirtualThread(thr); int i; err = jvmti_env->GetFrameLocation(thr, 1, &mid, &loc); @@ -71,51 +81,28 @@ void JNICALL Breakpoint(jvmtiEnv *jvmti_env, JNIEnv *env, printf(">>> checking on invalid slot ...\n"); } for (i = 0; i < entryCount; i++) { + char* var_name = table[i].name; + if (strcmp(table[i].name, "o") == 0) { err = jvmti_env->SetLocalObject(thr, 1, INV_SLOT, (jobject)thr); - if (err != JVMTI_ERROR_INVALID_SLOT) { - printf("(%s) ", table[i].name); - printf("Error expected: JVMTI_ERROR_INVALID_SLOT,\n"); - printf("\t actual: %s (%d)\n", TranslateError(err), err); - result = STATUS_FAILED; - } + check_error(err, is_virtual, var_name); } else if (strcmp(table[i].name, "i") == 0) { err = jvmti_env->SetLocalInt(thr, 1, INV_SLOT, (jint)0); - if (err != JVMTI_ERROR_INVALID_SLOT) { - printf("(%s) ", table[i].name); - printf("Error expected: JVMTI_ERROR_INVALID_SLOT,\n"); - printf("\t actual: %s (%d)\n", TranslateError(err), err); - result = STATUS_FAILED; - } + check_error(err, is_virtual, var_name); } else if (strcmp(table[i].name, "l") == 0) { err = jvmti_env->SetLocalLong(thr, 1, INV_SLOT, (jlong)0); - if (err != JVMTI_ERROR_INVALID_SLOT) { - printf("(%s) ", table[i].name); - printf("Error expected: JVMTI_ERROR_INVALID_SLOT,\n"); - printf("\t actual: %s (%d)\n", TranslateError(err), err); - result = STATUS_FAILED; - } + check_error(err, is_virtual, var_name); } else if (strcmp(table[i].name, "f") == 0) { err = jvmti_env->SetLocalFloat(thr, 1, INV_SLOT, (jfloat)0); - if (err != JVMTI_ERROR_INVALID_SLOT) { - printf("(%s) ", table[i].name); - printf("Error expected: JVMTI_ERROR_INVALID_SLOT,\n"); - printf("\t actual: %s (%d)\n", TranslateError(err), err); - result = STATUS_FAILED; - } + check_error(err, is_virtual, var_name); } else if (strcmp(table[i].name, "d") == 0) { err = jvmti_env->SetLocalDouble(thr, 1, INV_SLOT, (jdouble)0); - if (err != JVMTI_ERROR_INVALID_SLOT) { - printf("(%s) ", table[i].name); - printf("Error expected: JVMTI_ERROR_INVALID_SLOT,\n"); - printf("\t actual: %s (%d)\n", TranslateError(err), err); - result = STATUS_FAILED; - } + check_error(err, is_virtual, var_name); } } diff --git a/test/hotspot/jtreg/vmTestbase/nsk/jvmti/SetLocalVariable/setlocal004/setlocal004.cpp b/test/hotspot/jtreg/vmTestbase/nsk/jvmti/SetLocalVariable/setlocal004/setlocal004.cpp index 37598107c455c..db10b761ca457 100644 --- a/test/hotspot/jtreg/vmTestbase/nsk/jvmti/SetLocalVariable/setlocal004/setlocal004.cpp +++ b/test/hotspot/jtreg/vmTestbase/nsk/jvmti/SetLocalVariable/setlocal004/setlocal004.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,6 +40,15 @@ static jvmtiEventCallbacks callbacks; static jint result = PASSED; static jboolean printdump = JNI_FALSE; +static void check_error(jvmtiError err, bool is_virtual, char* var_name) { + if (err != JVMTI_ERROR_TYPE_MISMATCH && !(is_virtual && err == JVMTI_ERROR_OPAQUE_FRAME)) { + printf("(%s) ", var_name); + printf("Error: expected: JVMTI_ERROR_TYPE_MISMATCH or JVMTI_ERROR_OPAQUE_FRAME,\n"); + printf("\t actual: %s (%d)\n", TranslateError(err), err); + result = STATUS_FAILED; + } +} + void JNICALL Breakpoint(jvmtiEnv *jvmti_env, JNIEnv *env, jthread thr, jmethodID method, jlocation location) { jvmtiError err; @@ -47,6 +56,7 @@ void JNICALL Breakpoint(jvmtiEnv *jvmti_env, JNIEnv *env, jlocation loc; jint entryCount; jvmtiLocalVariableEntry *table; + bool is_virtual = env->IsVirtualThread(thr); int i; err = jvmti_env->GetFrameLocation(thr, 1, &mid, &loc); @@ -72,56 +82,33 @@ void JNICALL Breakpoint(jvmtiEnv *jvmti_env, JNIEnv *env, printf(">>> checking on type mismatch ...\n"); } for (i = 0; i < entryCount; i++) { + char* var_name = table[i].name; + if (strlen(table[i].name) != 1) continue; if (strcmp(table[i].name, "o") != 0) { err = jvmti_env->SetLocalObject(thr, 1, table[i].slot, (jobject)thr); - if (err != JVMTI_ERROR_TYPE_MISMATCH) { - printf("\"%s\" against SetLocalObject:\n", table[i].name); - printf(" expected: JVMTI_ERROR_TYPE_MISMATCH,"); - printf(" actual: %s (%d)\n", TranslateError(err), err); - result = STATUS_FAILED; - } + check_error(err, is_virtual, var_name); } if (strcmp(table[i].name, "i") != 0) { err = jvmti_env->SetLocalInt(thr, 1, table[i].slot, (jint)0); - if (err != JVMTI_ERROR_TYPE_MISMATCH) { - printf("\"%s\" against SetLocalInt:\n", table[i].name); - printf(" expected: JVMTI_ERROR_TYPE_MISMATCH,"); - printf(" actual: %s (%d)\n", TranslateError(err), err); - result = STATUS_FAILED; - } + check_error(err, is_virtual, var_name); } if (strcmp(table[i].name, "l") != 0) { err = jvmti_env->SetLocalLong(thr, 1, table[i].slot, (jlong)0); - if (err != JVMTI_ERROR_TYPE_MISMATCH) { - printf("\"%s\" against SetLocalLong:\n", table[i].name); - printf(" expected: JVMTI_ERROR_TYPE_MISMATCH,"); - printf(" actual: %s (%d)\n", TranslateError(err), err); - result = STATUS_FAILED; - } + check_error(err, is_virtual, var_name); } if (strcmp(table[i].name, "f") != 0) { err = jvmti_env->SetLocalFloat(thr, 1, table[i].slot, (jfloat)0); - if (err != JVMTI_ERROR_TYPE_MISMATCH) { - printf("\"%s\" against SetLocalFloat:\n", table[i].name); - printf(" expected: JVMTI_ERROR_TYPE_MISMATCH,"); - printf(" actual: %s (%d)\n", TranslateError(err), err); - result = STATUS_FAILED; - } + check_error(err, is_virtual, var_name); } if (strcmp(table[i].name, "d") != 0) { err = jvmti_env->SetLocalDouble(thr, 1, table[i].slot, (jdouble)0); - if (err != JVMTI_ERROR_TYPE_MISMATCH) { - printf("\"%s\" against SetLocalDouble:\n", table[i].name); - printf(" expected: JVMTI_ERROR_TYPE_MISMATCH,"); - printf(" actual: %s (%d)\n", TranslateError(err), err); - result = STATUS_FAILED; - } + check_error(err, is_virtual, var_name); } } diff --git a/test/jdk/java/awt/Headless/HeadlessMalfunctionAgent.java b/test/jdk/java/awt/Headless/HeadlessMalfunctionAgent.java new file mode 100644 index 0000000000000..dc422581fa993 --- /dev/null +++ b/test/jdk/java/awt/Headless/HeadlessMalfunctionAgent.java @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +import jdk.internal.org.objectweb.asm.ClassReader; +import jdk.internal.org.objectweb.asm.ClassVisitor; +import jdk.internal.org.objectweb.asm.ClassWriter; +import jdk.internal.org.objectweb.asm.MethodVisitor; +import jdk.internal.org.objectweb.asm.Opcodes; + +import java.lang.instrument.ClassFileTransformer; +import java.lang.instrument.Instrumentation; +import java.security.ProtectionDomain; + +/** + * This agent removes the isHeadless method from java.awt.GraphicsEnvironment. + */ +public class HeadlessMalfunctionAgent { + + public static void premain(String agentArgs, Instrumentation inst) { + inst.addTransformer(new ClassFileTransformer() { + + @Override + public byte[] transform(ClassLoader loader, String className, Class classBeingRedefined, + ProtectionDomain pd, byte[] cb) { + if ("java/awt/GraphicsEnvironment".equals(className)) { + System.out.println("Transforming java.awt.GraphicsEnvironment."); + try { + final ClassReader cr = new ClassReader(cb); + final ClassWriter cw = new ClassWriter(cr, 0); + cr.accept(new ClassVisitor(Opcodes.ASM9, cw) { + + @Override + public MethodVisitor visitMethod(int access, String name, String descriptor, String signature, + String[] exceptions) { + if ("isHeadless".equals(name) && "()Z".equals(descriptor)) { + System.out.println("isHeadless removed from java.awt.GraphicsEnvironment."); + // WHACK! Remove the isHeadless method. + return null; + } + return super.visitMethod(access, name, descriptor, signature, exceptions); + } + }, 0); + return cw.toByteArray(); + } catch (Exception e) { + e.printStackTrace(); + } + } + return null; + } + }); + } +} diff --git a/test/jdk/java/awt/Headless/HeadlessMalfunctionTest.java b/test/jdk/java/awt/Headless/HeadlessMalfunctionTest.java new file mode 100644 index 0000000000000..1d1a9c0eec01e --- /dev/null +++ b/test/jdk/java/awt/Headless/HeadlessMalfunctionTest.java @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import jdk.test.lib.JDKToolFinder; +import jdk.test.lib.process.OutputAnalyzer; +import jdk.test.lib.process.ProcessTools; + +import java.nio.file.Files; +import java.nio.file.Path; + +/* + * @test + * @bug 8336382 + * @summary Test that in absence of isHeadless method, the JDK throws a meaningful error message. + * @library /test/lib + * @modules java.base/jdk.internal.org.objectweb.asm + * @build HeadlessMalfunctionAgent + * @run driver jdk.test.lib.helpers.ClassFileInstaller + * HeadlessMalfunctionAgent + * HeadlessMalfunctionAgent$1 + * HeadlessMalfunctionAgent$1$1 + * @run driver HeadlessMalfunctionTest + */ +public class HeadlessMalfunctionTest { + + public static void main(String[] args) throws Exception { + // Package agent + Files.writeString(Path.of("MANIFEST.MF"), "Premain-Class: HeadlessMalfunctionAgent\n"); + final ProcessBuilder pbJar = new ProcessBuilder() + .command(JDKToolFinder.getJDKTool("jar"), "cmf", "MANIFEST.MF", "agent.jar", + "HeadlessMalfunctionAgent.class", + "HeadlessMalfunctionAgent$1.class", + "HeadlessMalfunctionAgent$1$1.class"); + ProcessTools.executeProcess(pbJar).shouldHaveExitValue(0); + + // Run test + final ProcessBuilder pbJava = ProcessTools.createTestJavaProcessBuilder( + "--add-opens", + "java.base/jdk.internal.org.objectweb.asm=ALL-UNNAMED", + "-javaagent:agent.jar", + "HeadlessMalfunctionTest$Runner" + ); + final OutputAnalyzer output = ProcessTools.executeProcess(pbJava); + // Unpatched JDK logs: "FATAL ERROR in native method: Could not allocate library name" + output.shouldContain("FATAL ERROR in native method: GetStaticMethodID isHeadless failed"); + output.shouldNotHaveExitValue(0); + } + + public static class Runner { + public static void main(String[] args) { + System.out.println(java.awt.GraphicsEnvironment + .getLocalGraphicsEnvironment() + .getMaximumWindowBounds()); + } + } +} diff --git a/test/jdk/java/time/test/java/time/TestZoneOffset.java b/test/jdk/java/time/test/java/time/TestZoneOffset.java index a69eedfcd6c25..b85a629aefce7 100644 --- a/test/jdk/java/time/test/java/time/TestZoneOffset.java +++ b/test/jdk/java/time/test/java/time/TestZoneOffset.java @@ -1,5 +1,6 @@ /* - * Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2025, Alibaba Group Holding Limited. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -82,4 +83,19 @@ public void test_factory_ofTotalSecondsSame() { assertSame(ZoneOffset.ofTotalSeconds(0), ZoneOffset.UTC); } + @Test + public void test_quarter_cache() throws Exception { + // [-18:00, +18:00] + int quarter = 15 * 60; + int start = -18 * 3600, + end = 18 * 3600; + for (int totalSeconds = start; totalSeconds <= end; totalSeconds += quarter) { + var offset0 = ZoneOffset.ofTotalSeconds(totalSeconds); + var offset1 = ZoneOffset.ofTotalSeconds(totalSeconds); + var offset2 = ZoneOffset.ofTotalSeconds(totalSeconds); + assertSame(offset0, offset1); + assertSame(offset1, offset2); + } + } + } diff --git a/test/jdk/javax/swing/JPopupMenu/FocusablePopupDismissTest.java b/test/jdk/javax/swing/JPopupMenu/FocusablePopupDismissTest.java index 2704c9789e309..cb3811265dc12 100644 --- a/test/jdk/javax/swing/JPopupMenu/FocusablePopupDismissTest.java +++ b/test/jdk/javax/swing/JPopupMenu/FocusablePopupDismissTest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,22 +24,28 @@ /* * @test * @key headful - * @bug 8319103 + * @bug 8319103 8342096 * @requires (os.family == "linux") - * @library /java/awt/regtesthelpers - * @build PassFailJFrame + * @library /java/awt/regtesthelpers /test/lib + * @build PassFailJFrame jtreg.SkippedException * @summary Tests if the focusable popup can be dismissed when the parent * window or the popup itself loses focus in Wayland. * @run main/manual FocusablePopupDismissTest */ +import javax.swing.BorderFactory; import javax.swing.JButton; import javax.swing.JFrame; +import javax.swing.JMenu; +import javax.swing.JMenuItem; +import javax.swing.JPanel; import javax.swing.JPopupMenu; import javax.swing.JTextField; import java.awt.Window; import java.util.List; +import jtreg.SkippedException; + public class FocusablePopupDismissTest { private static final String INSTRUCTIONS = """ A frame with a "Click me" button should appear next to the window @@ -47,44 +53,72 @@ public class FocusablePopupDismissTest { Click on the "Click me" button. - If the JTextField popup with "Some text" is not showing on the screen, - click Fail. + A menu should appear next to the window. If you move the cursor over + the first menu, the JTextField popup should appear on the screen. + If it doesn't, click Fail. The following steps require some focusable system window to be displayed on the screen. This could be a system settings window, file manager, etc. Click on the "Click me" button if the popup is not displayed - on the screen. + on the screen, move the mouse pointer over the menu. While the popup is displayed, click on some other window on the desktop. - If the popup has disappeared, click Pass, otherwise click Fail. + If the popup does not disappear, click Fail. + + Open the menu again, move the mouse cursor over the following: + "Focusable 1" -> "Focusable 2" -> "Editor Focusable 2" + Move the mouse to the focusable system window + (keeping the "Editor Focusable 2" JTextField open) and click on it. + + If the popup does not disappear, click Fail, otherwise click Pass. """; public static void main(String[] args) throws Exception { if (System.getenv("WAYLAND_DISPLAY") == null) { - //test is valid only when running on Wayland. - return; + throw new SkippedException("XWayland only test"); } PassFailJFrame.builder() .title("FocusablePopupDismissTest") .instructions(INSTRUCTIONS) - .rows(20) .columns(45) .testUI(FocusablePopupDismissTest::createTestUI) .build() .awaitAndCheck(); } + static JMenu getMenuWithMenuItem(boolean isSubmenuItemFocusable, String text) { + JMenu menu = new JMenu(text); + menu.add(isSubmenuItemFocusable + ? new JTextField("Editor " + text, 11) + : new JMenuItem("Menu item" + text) + ); + return menu; + } + static List createTestUI() { JFrame frame = new JFrame("FocusablePopupDismissTest"); JButton button = new JButton("Click me"); - frame.add(button); + + JPanel wrapper = new JPanel(); + wrapper.setBorder(BorderFactory.createEmptyBorder(16, 16, 16, 16)); + wrapper.add(button); + + frame.add(wrapper); button.addActionListener(e -> { JPopupMenu popupMenu = new JPopupMenu(); - JTextField textField = new JTextField("Some text", 10); - popupMenu.add(textField); + + JMenu menu1 = new JMenu("Menu 1"); + menu1.add(new JTextField("Some text", 10)); + JMenu menu2 = new JMenu("Menu 2"); + menu2.add(new JTextField("Some text", 10)); + + popupMenu.add(getMenuWithMenuItem(true, "Focusable 1")); + popupMenu.add(getMenuWithMenuItem(true, "Focusable 2")); + popupMenu.add(getMenuWithMenuItem(false, "Non-Focusable 1")); + popupMenu.add(getMenuWithMenuItem(false, "Non-Focusable 2")); popupMenu.show(button, 0, button.getHeight()); }); frame.pack(); diff --git a/test/jdk/javax/swing/JPopupMenu/NestedFocusablePopupTest.java b/test/jdk/javax/swing/JPopupMenu/NestedFocusablePopupTest.java new file mode 100644 index 0000000000000..55963b081a558 --- /dev/null +++ b/test/jdk/javax/swing/JPopupMenu/NestedFocusablePopupTest.java @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary tests if nested menu is displayed on Wayland + * @requires (os.family == "linux") + * @key headful + * @bug 8342096 + * @library /test/lib + * @build jtreg.SkippedException + * @run main NestedFocusablePopupTest + */ + +import javax.swing.JButton; +import javax.swing.JFrame; +import javax.swing.JMenu; +import javax.swing.JMenuItem; +import javax.swing.JPanel; +import javax.swing.JPopupMenu; +import javax.swing.SwingUtilities; +import java.awt.Component; +import java.awt.Dimension; +import java.awt.IllegalComponentStateException; +import java.awt.Rectangle; +import java.awt.Robot; +import java.awt.event.InputEvent; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.FutureTask; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import jtreg.SkippedException; + +public class NestedFocusablePopupTest { + + static volatile JMenu menuWithFocusableItem; + static volatile JMenu menuWithNonFocusableItem; + static volatile JPopupMenu popupMenu; + static volatile JFrame frame; + static volatile Robot robot; + + public static void main(String[] args) throws Exception { + if (System.getenv("WAYLAND_DISPLAY") == null) { + throw new SkippedException("XWayland only test"); + } + + robot = new Robot(); + robot.setAutoDelay(50); + + try { + SwingUtilities.invokeAndWait(NestedFocusablePopupTest::initAndShowGui); + test0(); + test1(); + } finally { + SwingUtilities.invokeAndWait(frame::dispose); + } + } + + public static void waitTillShown(final Component component, long msTimeout) + throws InterruptedException, TimeoutException { + long startTime = System.currentTimeMillis(); + + while (true) { + try { + Thread.sleep(50); + component.getLocationOnScreen(); + break; + } catch (IllegalComponentStateException e) { + if (System.currentTimeMillis() - startTime > msTimeout) { + throw new TimeoutException("Component not shown within the specified timeout"); + } + } + } + } + + static Rectangle waitAndGetOnScreenBoundsOnEDT(Component component) + throws InterruptedException, TimeoutException, ExecutionException { + waitTillShown(component, 500); + robot.waitForIdle(); + + FutureTask task = new FutureTask<>(() + -> new Rectangle(component.getLocationOnScreen(), component.getSize())); + SwingUtilities.invokeLater(task); + return task.get(500, TimeUnit.MILLISECONDS); + } + + static void test0() throws Exception { + Rectangle frameBounds = waitAndGetOnScreenBoundsOnEDT(frame); + robot.mouseMove(frameBounds.x + frameBounds.width / 2, + frameBounds.y + frameBounds.height / 2); + + robot.mousePress(InputEvent.BUTTON3_DOWN_MASK); + robot.mouseRelease(InputEvent.BUTTON3_DOWN_MASK); + + Rectangle menuBounds = waitAndGetOnScreenBoundsOnEDT(menuWithFocusableItem); + robot.mouseMove(menuBounds.x + 5, menuBounds.y + 5); + + // Give popup some time to disappear (in case of failure) + robot.waitForIdle(); + robot.delay(200); + + try { + waitTillShown(popupMenu, 500); + } catch (TimeoutException e) { + throw new RuntimeException("The popupMenu disappeared when it shouldn't have."); + } + } + + static void test1() throws Exception { + Rectangle frameBounds = waitAndGetOnScreenBoundsOnEDT(frame); + robot.mouseMove(frameBounds.x + frameBounds.width / 2, + frameBounds.y + frameBounds.height / 2); + + robot.mousePress(InputEvent.BUTTON3_DOWN_MASK); + robot.mouseRelease(InputEvent.BUTTON3_DOWN_MASK); + + Rectangle menuBounds = waitAndGetOnScreenBoundsOnEDT(menuWithFocusableItem); + robot.mouseMove(menuBounds.x + 5, menuBounds.y + 5); + robot.waitForIdle(); + robot.delay(200); + + menuBounds = waitAndGetOnScreenBoundsOnEDT(menuWithNonFocusableItem); + robot.mouseMove(menuBounds.x + 5, menuBounds.y + 5); + + // Give popup some time to disappear (in case of failure) + robot.waitForIdle(); + robot.delay(200); + + try { + waitTillShown(popupMenu, 500); + } catch (TimeoutException e) { + throw new RuntimeException("The popupMenu disappeared when it shouldn't have."); + } + } + + static JMenu getMenuWithMenuItem(boolean isSubmenuItemFocusable, String text) { + JMenu menu = new JMenu(text); + menu.add(isSubmenuItemFocusable + ? new JButton(text) + : new JMenuItem(text) + ); + return menu; + } + + private static void initAndShowGui() { + frame = new JFrame("NestedFocusablePopupTest"); + JPanel panel = new JPanel(); + panel.setPreferredSize(new Dimension(200, 180)); + + + popupMenu = new JPopupMenu(); + menuWithFocusableItem = + getMenuWithMenuItem(true, "focusable subitem"); + menuWithNonFocusableItem = + getMenuWithMenuItem(false, "non-focusable subitem"); + + popupMenu.add(menuWithFocusableItem); + popupMenu.add(menuWithNonFocusableItem); + + panel.setComponentPopupMenu(popupMenu); + frame.add(panel); + frame.pack(); + frame.setLocationRelativeTo(null); + frame.setVisible(true); + } +} diff --git a/test/jdk/javax/swing/JProgressBar/TestProgressBarUI.java b/test/jdk/javax/swing/JProgressBar/TestProgressBarUI.java new file mode 100644 index 0000000000000..0091fe625c9bd --- /dev/null +++ b/test/jdk/javax/swing/JProgressBar/TestProgressBarUI.java @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8318577 + * @summary Tests JProgressBarUI renders correctly in Windows L&F + * @requires (os.family == "windows") + * @library /java/awt/regtesthelpers + * @build PassFailJFrame + * @run main/manual TestProgressBarUI + */ + +import java.awt.BorderLayout; +import java.awt.Color; +import java.awt.Dimension; +import java.awt.FlowLayout; + +import javax.swing.JComponent; +import javax.swing.JFrame; +import javax.swing.JPanel; +import javax.swing.JProgressBar; +import javax.swing.SwingUtilities; +import javax.swing.UIManager; + +public class TestProgressBarUI { + + private static final String instructionsText = """ + Two progressbar "Good" and "Bad" + will be shown with different preferred size, + If the "Bad" progressbar is rendered at the same + height as "Good" progressbar, + without any difference in padding internally + the test passes, otherwise fails. """; + + public static void main(String[] args) throws Exception { + System.setProperty("sun.java2d.uiScale", "2.0"); + UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName()); + PassFailJFrame.builder() + .title("ProgressBar Instructions") + .instructions(instructionsText) + .rows(9) + .columns(36) + .testUI(TestProgressBarUI::doTest) + .build() + .awaitAndCheck(); + } + + public static JFrame doTest() { + JFrame frame = new JFrame("JProgressBar"); + + JPanel panel = new JPanel(new FlowLayout(20, 20, FlowLayout.LEADING)); + panel.setBackground(Color.white); + + JProgressBar p1 = new JProgressBar(0, 100); + p1.setValue(50); + p1.setStringPainted(true); + p1.setString("GOOD"); + p1.setPreferredSize(new Dimension(100, 21)); + panel.add(p1); + + JProgressBar p2 = new JProgressBar(0, 100); + p2.setValue(50); + p2.setStringPainted(true); + p2.setString("BAD"); + + p2.setPreferredSize(new Dimension(100, 22)); + panel.add(p2); + + JComponent c = (JComponent) frame.getContentPane(); + c.add(panel, BorderLayout.CENTER); + + frame.pack(); + frame.setLocationByPlatform(true); + return frame; + } +} diff --git a/test/jdk/sun/tools/jhsdb/JShellHeapDumpTest.java b/test/jdk/sun/tools/jhsdb/JShellHeapDumpTest.java index 7ca36d40a6f90..9e9a6c79c7b60 100644 --- a/test/jdk/sun/tools/jhsdb/JShellHeapDumpTest.java +++ b/test/jdk/sun/tools/jhsdb/JShellHeapDumpTest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -53,7 +53,8 @@ public class JShellHeapDumpTest { static Process jShellProcess; static boolean doSleep = true; // By default do a short sleep when app starts up - public static void launch(String expectedMessage, List toolArgs) + // Returns false if the attempt should be retried. + public static boolean launch(String expectedMessage, List toolArgs, boolean allowRetry) throws IOException { try { @@ -81,6 +82,10 @@ public static void launch(String expectedMessage, List toolArgs) System.out.println("###### End of all output which took " + elapsedTime + "ms"); output.shouldHaveExitValue(0); } catch (Exception ex) { + if (allowRetry) { + System.out.println("Exception " + ex + " in 'launch' occured. Allow one retry."); + return false; + } throw new RuntimeException("Test ERROR " + ex, ex); } finally { if (jShellProcess.isAlive()) { @@ -91,12 +96,18 @@ public static void launch(String expectedMessage, List toolArgs) System.out.println("Jshell not alive"); } } + return true; } public static void launch(String expectedMessage, String... toolArgs) throws IOException { - launch(expectedMessage, Arrays.asList(toolArgs)); + boolean res = launch(expectedMessage, Arrays.asList(toolArgs), true); + // Allow a retry for !doSleep, because the sleep allows the debuggee to stabilize, + // making it very unlikely that jmap will fail. + if (!res && !doSleep) { + launch(expectedMessage, Arrays.asList(toolArgs), false); + } } /* Returns false if the attempt should be retried. */ diff --git a/test/micro/org/openjdk/bench/java/lang/foreign/AllocTest.java b/test/micro/org/openjdk/bench/java/lang/foreign/AllocTest.java index a70861a0dda7e..4ae78d6d99e91 100644 --- a/test/micro/org/openjdk/bench/java/lang/foreign/AllocTest.java +++ b/test/micro/org/openjdk/bench/java/lang/foreign/AllocTest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -64,9 +64,9 @@ public void tearDown() { } @Benchmark - public MemorySegment alloc_confined() { + public long alloc_confined() { try (Arena arena = Arena.ofConfined()) { - return arena.allocate(size); + return arena.allocate(size).address(); } } diff --git a/test/micro/org/openjdk/bench/java/lang/foreign/BulkOps.java b/test/micro/org/openjdk/bench/java/lang/foreign/BulkOps.java index 6a1dd05b615e5..60f36d9f1572b 100644 --- a/test/micro/org/openjdk/bench/java/lang/foreign/BulkOps.java +++ b/test/micro/org/openjdk/bench/java/lang/foreign/BulkOps.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -69,7 +69,7 @@ public class BulkOps { final int[] ints = new int[ELEM_SIZE]; final MemorySegment bytesSegment = MemorySegment.ofArray(ints); - final int UNSAFE_INT_OFFSET = unsafe.arrayBaseOffset(int[].class); + final long UNSAFE_INT_OFFSET = unsafe.arrayBaseOffset(int[].class); // large(ish) segments/buffers with same content, 0, for mismatch, non-multiple-of-8 sized static final int SIZE_WITH_TAIL = (1024 * 1024) + 7; diff --git a/test/micro/org/openjdk/bench/java/lang/foreign/LoopOverNonConstantHeap.java b/test/micro/org/openjdk/bench/java/lang/foreign/LoopOverNonConstantHeap.java index baaa19097188b..0605db076ca71 100644 --- a/test/micro/org/openjdk/bench/java/lang/foreign/LoopOverNonConstantHeap.java +++ b/test/micro/org/openjdk/bench/java/lang/foreign/LoopOverNonConstantHeap.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,8 +57,8 @@ public class LoopOverNonConstantHeap extends JavaLayouts { static final int ELEM_SIZE = 1_000_000; static final int CARRIER_SIZE = (int)JAVA_INT.byteSize(); static final int ALLOC_SIZE = ELEM_SIZE * CARRIER_SIZE; - static final int UNSAFE_BYTE_BASE = unsafe.arrayBaseOffset(byte[].class); - static final int UNSAFE_INT_BASE = unsafe.arrayBaseOffset(int[].class); + static final long UNSAFE_BYTE_BASE = unsafe.arrayBaseOffset(byte[].class); + static final long UNSAFE_INT_BASE = unsafe.arrayBaseOffset(int[].class); MemorySegment segment, alignedSegment; byte[] base; diff --git a/test/micro/org/openjdk/bench/java/lang/foreign/xor/GetArrayUnsafeXorOpImpl.java b/test/micro/org/openjdk/bench/java/lang/foreign/xor/GetArrayUnsafeXorOpImpl.java index 0b29f925c7c30..59079dc09d1a7 100644 --- a/test/micro/org/openjdk/bench/java/lang/foreign/xor/GetArrayUnsafeXorOpImpl.java +++ b/test/micro/org/openjdk/bench/java/lang/foreign/xor/GetArrayUnsafeXorOpImpl.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,7 +42,7 @@ public class GetArrayUnsafeXorOpImpl implements XorOp { static final Unsafe UNSAFE = Utils.unsafe; - static final int BYTE_ARR_OFFSET = Utils.unsafe.arrayBaseOffset(byte[].class); + static final long BYTE_ARR_OFFSET = Utils.unsafe.arrayBaseOffset(byte[].class); static { System.loadLibrary("jnitest"); diff --git a/test/micro/org/openjdk/bench/vm/compiler/FluidSBBench.java b/test/micro/org/openjdk/bench/vm/compiler/FluidSBBench.java deleted file mode 100644 index 794ff768678ab..0000000000000 --- a/test/micro/org/openjdk/bench/vm/compiler/FluidSBBench.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. - * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package org.openjdk.bench.vm.compiler; - -import org.openjdk.jmh.annotations.Benchmark; -import org.openjdk.jmh.annotations.BenchmarkMode; -import org.openjdk.jmh.annotations.Fork; -import org.openjdk.jmh.annotations.Measurement; -import org.openjdk.jmh.annotations.Mode; -import org.openjdk.jmh.annotations.OutputTimeUnit; -import org.openjdk.jmh.annotations.Warmup; -import org.openjdk.jmh.annotations.State; -import org.openjdk.jmh.annotations.Scope; -import java.util.concurrent.TimeUnit; - -@Warmup(iterations = 3, time = 300, timeUnit = TimeUnit.MILLISECONDS) -@Measurement(iterations = 3, time = 300, timeUnit = TimeUnit.MILLISECONDS) -@Fork(value = 1, jvmArgsAppend = {"-XX:+UseParallelGC", "-Xmx1g", "-Xms1g"}) -@BenchmarkMode(Mode.AverageTime) -@OutputTimeUnit(TimeUnit.NANOSECONDS) -@State(Scope.Thread) -public class FluidSBBench { - static final String PREFIX = "a"; - String foo = "aaaaa aaaaa aaaaa aaaaa aaaaa"; - - @Benchmark - public String fluid() { - return new StringBuilder().append(PREFIX).append(foo).toString(); - } - - @Benchmark - public String nonFluid() { - final StringBuilder sb = new StringBuilder(); - sb.append(PREFIX); - sb.append(foo); - return sb.toString(); - } -}