Skip to content

Commit

Permalink
Clean up optimized-oss.yaml
Browse files Browse the repository at this point in the history
Pull Request resolved: #8549

We don't need this now that we support log_softmax and gelu in OSS!
ghstack-source-id: 267452085
@exported-using-ghexport

Differential Revision: [D69475020](https://our.internmc.facebook.com/intern/diff/D69475020/)
  • Loading branch information
Github Executorch committed Feb 20, 2025
1 parent cf17b4f commit 33fcf11
Show file tree
Hide file tree
Showing 11 changed files with 26 additions and 154 deletions.
6 changes: 3 additions & 3 deletions build/cmake_deps.toml
Original file line number Diff line number Diff line change
Expand Up @@ -117,9 +117,9 @@ deps = [
"executorch",
]

[targets.optimized_native_cpu_ops_oss]
[targets.optimized_native_cpu_ops]
buck_targets = [
"//configurations:optimized_native_cpu_ops_oss",
"//configurations:optimized_native_cpu_ops",
]
filters = [
".cpp$",
Expand Down Expand Up @@ -437,6 +437,6 @@ deps = [
"portable_kernels",
"quantized_kernels",
"xnnpack_backend",
"optimized_native_cpu_ops_oss",
"optimized_native_cpu_ops",
]
# ---------------------------------- LLama end ----------------------------------
2 changes: 1 addition & 1 deletion configurations/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ include(${EXECUTORCH_ROOT}/build/Codegen.cmake)
if(EXECUTORCH_BUILD_KERNELS_OPTIMIZED)
# Merge optimized and portable definitions, taking optimized where available.
merge_yaml(
FUNCTIONS_YAML ${EXECUTORCH_ROOT}/kernels/optimized/optimized-oss.yaml
FUNCTIONS_YAML ${EXECUTORCH_ROOT}/kernels/optimized/optimized.yaml
FALLBACK_YAML ${EXECUTORCH_ROOT}/kernels/portable/functions.yaml OUTPUT_DIR
${CMAKE_CURRENT_BINARY_DIR}
)
Expand Down
18 changes: 0 additions & 18 deletions configurations/targets.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -50,21 +50,3 @@ def define_common_targets():
"@EXECUTORCH_CLIENTS",
],
)

# TODO(T183193812): delete this target after optimized-oss.yaml is gone
executorch_generated_lib(
name = "optimized_native_cpu_ops_oss",
deps = [
"//executorch/kernels/optimized:optimized_operators",
"//executorch/kernels/optimized:optimized_oplist",
"//executorch/kernels/portable:executorch_aten_ops",
"//executorch/kernels/portable:operators",
],
functions_yaml_target = "//executorch/kernels/optimized:optimized-oss.yaml",
fallback_yaml_target = "//executorch/kernels/portable:functions.yaml",
define_static_targets = True,
visibility = [
"//executorch/examples/...",
"@EXECUTORCH_CLIENTS",
],
)
5 changes: 1 addition & 4 deletions examples/models/llama/runner/targets.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -3,17 +3,14 @@ load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")
def _get_operator_lib(aten = False):
if aten:
return ["//executorch/kernels/aten:generated_lib"]
elif runtime.is_oss:
# TODO(T183193812): delete this path after optimized-oss.yaml is no more.
return ["//executorch/configurations:optimized_native_cpu_ops_oss", "//executorch/extension/llm/custom_ops:custom_ops"]
else:
return ["//executorch/configurations:optimized_native_cpu_ops", "//executorch/extension/llm/custom_ops:custom_ops"]

def get_qnn_dependency():
# buck build -c executorch.enable_qnn=true //executorch/examples/models/llama/runner:runner
# Check if QNN is enabled before including the dependency
if native.read_config("executorch", "enable_qnn", "false") == "true":
# //executorch/backends/qualcomm:qnn_executorch_backend doesn't work,
# //executorch/backends/qualcomm:qnn_executorch_backend doesn't work,
# likely due to it's an empty library with dependency only
return [
"//executorch/backends/qualcomm/runtime:runtime",
Expand Down
4 changes: 2 additions & 2 deletions kernels/optimized/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -49,12 +49,12 @@ target_compile_options(cpublas PUBLIC ${_common_compile_options})

# Generate C++ bindings to register kernels into both PyTorch (for AOT) and
# Executorch (for runtime). Here select all ops in optimized.yaml
set(_yaml "${CMAKE_CURRENT_LIST_DIR}/optimized-oss.yaml")
set(_yaml "${CMAKE_CURRENT_LIST_DIR}/optimized.yaml")
gen_selected_ops(LIB_NAME "optimized_ops_lib" OPS_SCHEMA_YAML "${_yaml}")

generate_bindings_for_kernels(
LIB_NAME "optimized_ops_lib" FUNCTIONS_YAML
${CMAKE_CURRENT_SOURCE_DIR}/optimized-oss.yaml
${CMAKE_CURRENT_SOURCE_DIR}/optimized.yaml
ADD_EXCEPTION_BOUNDARY
)
message("Generated files ${gen_command_sources}")
Expand Down
8 changes: 3 additions & 5 deletions kernels/optimized/cpu/targets.bzl
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")
load("@fbsource//xplat/executorch/kernels/optimized:op_registration_util.bzl", "define_op_target", "is_op_disabled", "op_target")
load("@fbsource//xplat/executorch/kernels/optimized:op_registration_util.bzl", "define_op_target", "op_target")

_OPTIMIZED_ATEN_OPS = (
op_target(
Expand Down Expand Up @@ -110,13 +110,11 @@ def define_common_targets():
TARGETS and BUCK files that call this function.
"""

enabled_ops = [op for op in _OPTIMIZED_ATEN_OPS if not is_op_disabled(op["name"])]

# Define build targets for all operators registered in the tables above.
for op in enabled_ops:
for op in _OPTIMIZED_ATEN_OPS:
define_op_target(**op)

aten_op_targets = [":{}".format(op["name"]) for op in enabled_ops]
aten_op_targets = [":{}".format(op["name"]) for op in _OPTIMIZED_ATEN_OPS]
all_op_targets = aten_op_targets

runtime.cxx_library(
Expand Down
6 changes: 1 addition & 5 deletions kernels/optimized/op_registration_util.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@ load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")
load("@fbsource//xplat/executorch/build:selects.bzl", "selects")
load(
"@fbsource//xplat/executorch/kernels/optimized:lib_defs.bzl",
"get_vec_preprocessor_flags",
"get_vec_deps",
"get_vec_preprocessor_flags",
)
load(
"@fbsource//xplat/executorch/kernels/portable:op_registration_util.bzl",
Expand Down Expand Up @@ -137,7 +137,3 @@ def define_op_target(name, compiler_flags, deps):
compiler_flags = compiler_flags,
deps = deps,
)

def is_op_disabled(name):
# All ops are enabled for internal builds.
return False
96 changes: 0 additions & 96 deletions kernels/optimized/optimized-oss.yaml

This file was deleted.

8 changes: 0 additions & 8 deletions kernels/optimized/targets.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,6 @@ def define_common_targets(is_fbcode=False):
],
)

runtime.export_file(
name = "optimized-oss.yaml",
visibility = [
"//executorch/...",
"@EXECUTORCH_CLIENTS",
],
)

runtime.cxx_library(
name = "optimized_operators",
srcs = [],
Expand Down
4 changes: 1 addition & 3 deletions kernels/test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -270,6 +270,7 @@ set(_optimized_kernels_test_sources
"op_le_test.cpp"
"op_linear_test.cpp"
"op_log_softmax_test.cpp"
"op_mm_test.cpp"
"op_mul_test.cpp"
"op_native_layer_norm_test.cpp"
"op_neg_test.cpp"
Expand All @@ -278,9 +279,6 @@ set(_optimized_kernels_test_sources
${CMAKE_CURRENT_BINARY_DIR}/include/portable/executorch/kernels/test/supported_features.cpp
)

# We don't have sleef on OSS so we don't have log_softmax
list(REMOVE_ITEM _optimized_kernels_test_sources "op_log_softmax_test.cpp")

et_cxx_test(
optimized_kernels_test
SOURCES
Expand Down
23 changes: 14 additions & 9 deletions shim/xplat/executorch/kernels/optimized/op_registration_util.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,13 @@ load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")
load("@fbsource//xplat/executorch/build:selects.bzl", "selects")
load(
"@fbsource//xplat/executorch/kernels/optimized:lib_defs.bzl",
"get_vec_deps",
"get_vec_preprocessor_flags",
)
load(
"@fbsource//xplat/executorch/kernels/portable:op_registration_util.bzl",
"get_compiler_optimization_flags",
)

def op_target(name, deps = [], compiler_flags = []):
"""Registers an optimized implementation for an operator overload group.
Expand Down Expand Up @@ -94,12 +99,17 @@ def define_op_library(name, compiler_flags, deps):
"//executorch/kernels/test/...",
"@EXECUTORCH_CLIENTS",
],
# kernels often have helpers with no prototypes just disabling the warning here as the headers
# are codegend and linked in later
compiler_flags = ["-Wno-missing-prototypes"],
compiler_flags = [
# kernels often have helpers with no prototypes just disabling the warning here as the headers
# are codegend and linked in later
"-Wno-missing-prototypes",
# pragma unroll fails with -Os, don't need to warn us and
# fail Werror builds; see https://godbolt.org/z/zvf85vTsr
"-Wno-pass-failed",
] + get_compiler_optimization_flags(),
deps = [
"//executorch/runtime/kernel:kernel_includes",
] + augmented_deps,
] + augmented_deps + get_vec_deps(),
preprocessor_flags = get_vec_preprocessor_flags(),
# sleef needs to be added as a direct dependency of the operator target when building for Android,
# or a linker error may occur. Not sure why this happens; it seems that fbandroid_platform_deps of
Expand Down Expand Up @@ -134,8 +144,3 @@ def define_op_target(name, compiler_flags, deps):
compiler_flags = compiler_flags,
deps = deps,
)

def is_op_disabled(name):
# TODO (gjcomer) Enable ops with sleef dependency in OSS
disabled_ops = ["op_log_softmax"]
return name in disabled_ops

0 comments on commit 33fcf11

Please sign in to comment.