diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 83e1e9a4e8..22b381ece7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -116,21 +116,21 @@ jobs: run: curl -fsL https://github.com/google/ko/releases/download/v0.8.1/ko_0.8.1_Linux_x86_64.tar.gz | sudo tar xzf - -C /usr/local/bin ko - name: Install Shipwright Build run: | - make install-operator-kind - kubectl -n build-operator rollout status deployment build-operator --timeout=1m || true + make install-controller-kind + kubectl -n shipwright-build rollout status deployment shipwright-build-controller --timeout=1m || true - name: Test - run: TEST_E2E_OPERATOR=managed_outside TEST_NAMESPACE=build-operator TEST_IMAGE_REPO=registry.registry.svc.cluster.local:32222/shipwright-io/build-e2e make test-e2e - - name: Build operator logs + run: TEST_E2E_OPERATOR=managed_outside TEST_NAMESPACE=shipwright-build TEST_IMAGE_REPO=registry.registry.svc.cluster.local:32222/shipwright-io/build-e2e make test-e2e + - name: Build controller logs if: ${{ failure() }} run: | echo "# Pods:" - kubectl -n build-operator get pod + kubectl -n shipwright-build get pod PODS=$(kubectl -n build-operator get pod -o json) - POD_NAME=$(echo "${PODS}" | jq -r '.items[] | select(.metadata.name | startswith("build-operator-")) | .metadata.name') + POD_NAME=$(echo "${PODS}" | jq -r '.items[] | select(.metadata.name | startswith("shipwright-build-controller-")) | .metadata.name') RESTART_COUNT=$(echo "${PODS}" | jq -r ".items[] | select(.metadata.name == \"${POD_NAME}\") | .status.containerStatuses[0].restartCount") if [ "${RESTART_COUNT}" != "0" ]; then echo "# Previous logs:" kubectl -n build-operator logs "${POD_NAME}" --previous || true fi echo "# Logs:" - kubectl -n build-operator logs "${POD_NAME}" + kubectl -n shipwright-build logs "${POD_NAME}" diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 9a1e444c19..2286bf4b68 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -94,7 +94,7 @@ You must install these tools: ## Environment Setup -To run your operator, you'll need to set these environment variables (we recommend adding them to your `.bashrc`): +To run your controller, you'll need to set these environment variables (we recommend adding them to your `.bashrc`): 1. `GOPATH`: If you don't have one, simply pick a directory and add `export GOPATH=...` @@ -118,7 +118,7 @@ Note: This is roughly equivalent to [`docker login`](https://docs.docker.com/eng ## Install Shipwright Build -The following set of steps highlight how to deploy a Build operator pod into an existing Kubernetes cluster. +The following set of steps highlight how to deploy a Build controller pod into an existing Kubernetes cluster. 1. Target your Kubernetes cluster and install the Shipwright Build. Run this from the root of the source repo: @@ -130,17 +130,17 @@ The following set of steps highlight how to deploy a Build operator pod into an image registry you push to, or `kind.local` if you're using [KinD](https://kind.sigs.k8s.io). -1. Build and deploy the operator from source, from within the root of the repo: +1. Build and deploy the controller from source, from within the root of the repo: ```sh ko apply -P -R -f deploy/ ``` -The above steps give you a running Build operator that executes the code from your current branch. +The above steps give you a running Build controller that executes the code from your current branch. -### Redeploy operator +### Redeploy controller -As you make changes to the code, you can redeploy your operator with: +As you make changes to the code, you can redeploy your controller with: ```sh ko apply -P -R -f deploy/ @@ -156,9 +156,9 @@ You can clean up everything with: ### Accessing logs -To look at the operator logs, run: +To look at the controller logs, run: ```sh -kubectl -n build-operator logs $(kubectl -n build-operator get pods -l name=build-operator -o name) +kubectl -n shipwright-build logs $(kubectl -n shipwright-build get pods -l name=shipwright-build-controller -o name) ``` diff --git a/HACK.md b/HACK.md index e0f37170da..ff50235c9a 100644 --- a/HACK.md +++ b/HACK.md @@ -4,7 +4,7 @@ Copyright The Shipwright Contributors SPDX-License-Identifier: Apache-2.0 --> -# Running the Operator +# Running the Controller Assuming you are logged in to an OpenShift/Kubernetes cluster, run @@ -33,7 +33,7 @@ Or oc policy add-role-to-user system:image-builder pipeline ``` -In the near future, the above would be setup by the operator. +In the near future, the above would be setup by the controller. ## Building it locally diff --git a/Makefile b/Makefile index 4ce1c1423c..12b62edff0 100644 --- a/Makefile +++ b/Makefile @@ -2,8 +2,8 @@ SHELL := /bin/bash # output directory, where all artifacts will be created and managed OUTPUT_DIR ?= build/_output -# relative path to operator binary -OPERATOR = $(OUTPUT_DIR)/bin/build-operator +# relative path to controller binary +OPERATOR = $(OUTPUT_DIR)/bin/shipwright-build-controller # golang cache directory path GOCACHE ?= $(shell echo ${PWD})/$(OUTPUT_DIR)/gocache @@ -27,7 +27,7 @@ OPERATOR_SDK_EXTRA_ARGS ?= --debug # test namespace name TEST_NAMESPACE ?= default -# CI: tekton pipelines operator version +# CI: tekton pipelines controller version TEKTON_VERSION ?= v0.20.1 # CI: operator-sdk version SDK_VERSION ?= v0.18.2 @@ -231,7 +231,7 @@ test-e2e-plain: ginkgo TEST_E2E_VERIFY_TEKTONOBJECTS=${TEST_E2E_VERIFY_TEKTONOBJECTS} \ $(GINKGO) ${TEST_E2E_FLAGS} test/e2e -.PHONY: install install-apis install-operator install-strategies +.PHONY: install install-apis install-controller install-strategies install: KO_DOCKER_REPO="$(IMAGE_HOST)/$(IMAGE)" GOFLAGS="$(GO_FLAGS)" ko apply --bare -R -f deploy/ @@ -244,21 +244,21 @@ install-apis: # Wait for the CRD type to be established; this can take a second or two. kubectl wait --timeout=10s --for condition=established crd/clusterbuildstrategies.build.dev -install-operator: install-apis +install-controller: install-apis KO_DOCKER_REPO="$(IMAGE_HOST)/$(IMAGE)" GOFLAGS="$(GO_FLAGS)" ko apply --bare -f deploy/ -install-operator-kind: install-apis +install-controller-kind: install-apis KO_DOCKER_REPO=kind.local GOFLAGS="$(GO_FLAGS)" ko apply -f deploy/ install-strategies: install-apis kubectl apply -R -f samples/buildstrategy/ local: vendor install-strategies - OPERATOR_NAME=build-operator \ + OPERATOR_NAME=shipwright-build-controller \ operator-sdk run local --operator-flags="$(ZAP_FLAGS)" local-plain: vendor - OPERATOR_NAME=build-operator \ + OPERATOR_NAME=shipwright-build-controller \ operator-sdk run local --operator-flags="$(ZAP_FLAGS)" clean: diff --git a/build/Dockerfile b/build/Dockerfile index a405ef5987..0c1e4ea545 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -4,12 +4,12 @@ FROM registry.access.redhat.com/ubi8/ubi-minimal:latest -ENV OPERATOR=/usr/local/bin/build-operator \ +ENV OPERATOR=/usr/local/bin/shipwright-build-controller \ USER_UID=1001 \ - USER_NAME=build-operator + USER_NAME=shipwright-build-controller -# install operator binary -COPY build/_output/bin/build-operator ${OPERATOR} +# install controller binary +COPY build/_output/bin/shipwright-build-controller ${OPERATOR} COPY build/bin /usr/local/bin RUN /usr/local/bin/user_setup diff --git a/build/bin/entrypoint b/build/bin/entrypoint index 9e36bb5abe..8506f0418f 100755 --- a/build/bin/entrypoint +++ b/build/bin/entrypoint @@ -10,7 +10,7 @@ if ! whoami &>/dev/null; then if [ -w /etc/passwd ]; then - echo "${USER_NAME:-build-operator}:x:$(id -u):$(id -g):${USER_NAME:-build-operator} user:${HOME}:/sbin/nologin" >> /etc/passwd + echo "${USER_NAME:-shipwright-build-controller}:x:$(id -u):$(id -g):${USER_NAME:-shipwright-build-controller} user:${HOME}:/sbin/nologin" >> /etc/passwd fi fi diff --git a/cmd/manager/main.go b/cmd/manager/main.go deleted file mode 100644 index 3191e93f58..0000000000 --- a/cmd/manager/main.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright The Shipwright Contributors -// -// SPDX-License-Identifier: Apache-2.0 - -package main - -import ( - "context" - "flag" - "fmt" - "os" - "runtime" - - // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) - sdkVersion "github.com/operator-framework/operator-sdk/version" - "github.com/spf13/pflag" - - _ "k8s.io/client-go/plugin/pkg/client/auth" - "sigs.k8s.io/controller-runtime/pkg/client/config" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/manager/signals" - - buildconfig "github.com/shipwright-io/build/pkg/config" - "github.com/shipwright-io/build/pkg/controller" - "github.com/shipwright-io/build/pkg/controller/ready" - "github.com/shipwright-io/build/pkg/ctxlog" - buildMetrics "github.com/shipwright-io/build/pkg/metrics" - "github.com/shipwright-io/build/version" -) - -// Change below variables to serve metrics on different host or port. -var ( - metricsHost = "0.0.0.0" - metricsPort int32 = 8383 -) - -func printVersion(ctx context.Context) { - ctxlog.Info(ctx, fmt.Sprintf("Operator Version: %s", version.Version)) - ctxlog.Info(ctx, fmt.Sprintf("Go Version: %s", runtime.Version())) - ctxlog.Info(ctx, fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH)) - ctxlog.Info(ctx, fmt.Sprintf("Version of operator-sdk: %v", sdkVersion.Version)) -} - -func main() { - // Add the zap logger flag set to the CLI. The flag set must - // be added before calling pflag.Parse(). - pflag.CommandLine.AddGoFlagSet(ctxlog.CustomZapFlagSet()) - - // Add flags registered by imported packages (e.g. glog and - // controller-runtime) - pflag.CommandLine.AddGoFlagSet(flag.CommandLine) - - pflag.Parse() - - // Use a zap logr.Logger implementation. If none of the zap - // flags are configured (or if the zap flag set is not being - // used), this defaults to a production zap logger. - // - // The logger instantiated here can be changed to any logger - // implementing the logr.Logger interface. This logger will - // be propagated through the whole operator, generating - // uniform and structured logs. - - l := ctxlog.NewLogger("build") - - ctx := ctxlog.NewParentContext(l) - - printVersion(ctx) - - // Get a config to talk to the apiserver - cfg, err := config.GetConfig() - if err != nil { - ctxlog.Error(ctx, err, "") - os.Exit(1) - } - - r := ready.NewFileReady("/tmp/shipwright-build-ready") - err = r.Set() - if err != nil { - ctxlog.Error(ctx, err, "Checking for /tmp/shipwright-build-ready failed") - os.Exit(1) - } - defer r.Unset() - - buildCfg := buildconfig.NewDefaultConfig() - if err := buildCfg.SetConfigFromEnv(); err != nil { - ctxlog.Error(ctx, err, "") - os.Exit(1) - } - - mgr, err := controller.NewManager(ctx, buildCfg, cfg, manager.Options{ - LeaderElection: true, - LeaderElectionID: "build-operator-lock", - LeaderElectionNamespace: buildCfg.ManagerOptions.LeaderElectionNamespace, - LeaseDuration: buildCfg.ManagerOptions.LeaseDuration, - RenewDeadline: buildCfg.ManagerOptions.RenewDeadline, - RetryPeriod: buildCfg.ManagerOptions.RetryPeriod, - Namespace: "", - MetricsBindAddress: fmt.Sprintf("%s:%d", metricsHost, metricsPort), - }) - if err != nil { - ctxlog.Error(ctx, err, "") - os.Exit(1) - } - - buildMetrics.InitPrometheus(buildCfg) - - // Add optionally configured extra handlers to metrics endpoint - for path, handler := range buildMetrics.ExtraHandlers() { - ctxlog.Info(ctx, "Adding metrics extra handler path", "path", path) - if err := mgr.AddMetricsExtraHandler(path, handler); err != nil { - ctxlog.Error(ctx, err, "") - os.Exit(2) - } - } - - // Start the Cmd - ctxlog.Info(ctx, "Starting the Cmd.") - if err := mgr.Start(signals.SetupSignalHandler()); err != nil { - ctxlog.Error(ctx, err, "Manager exited non-zero") - os.Exit(1) - } -} diff --git a/deploy/namespace.yaml b/deploy/100-namespace.yaml similarity index 62% rename from deploy/namespace.yaml rename to deploy/100-namespace.yaml index 955e244a24..73be9e88b1 100644 --- a/deploy/namespace.yaml +++ b/deploy/100-namespace.yaml @@ -1,4 +1,4 @@ apiVersion: v1 kind: Namespace metadata: - name: build-operator + name: shipwright-build diff --git a/deploy/role.yaml b/deploy/200-role.yaml similarity index 95% rename from deploy/role.yaml rename to deploy/200-role.yaml index d39c3b4667..e33b77ecb8 100644 --- a/deploy/role.yaml +++ b/deploy/200-role.yaml @@ -1,7 +1,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: build-operator + name: shipwright-build-controller rules: - apiGroups: - "" @@ -48,7 +48,7 @@ rules: - apiGroups: - apps resourceNames: - - build-operator + - shipwright-build resources: - deployments/finalizers verbs: diff --git a/deploy/role_binding.yaml b/deploy/300-rolebinding.yaml similarity index 56% rename from deploy/role_binding.yaml rename to deploy/300-rolebinding.yaml index b498d1629b..05663dd173 100644 --- a/deploy/role_binding.yaml +++ b/deploy/300-rolebinding.yaml @@ -1,12 +1,12 @@ kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: build-operator + name: shipwright-build-controller subjects: - kind: ServiceAccount - name: build-operator - namespace: build-operator + name: shipwright-build-controller + namespace: shipwright-build roleRef: kind: ClusterRole - name: build-operator + name: shipwright-build-controller apiGroup: rbac.authorization.k8s.io diff --git a/deploy/400-serviceaccount.yaml b/deploy/400-serviceaccount.yaml new file mode 100644 index 0000000000..77f4bc8798 --- /dev/null +++ b/deploy/400-serviceaccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: shipwright-build-controller + namespace: shipwright-build diff --git a/deploy/operator.yaml b/deploy/500-controller.yaml similarity index 74% rename from deploy/operator.yaml rename to deploy/500-controller.yaml index 96f175d802..1e34a019f1 100644 --- a/deploy/operator.yaml +++ b/deploy/500-controller.yaml @@ -1,28 +1,28 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: build-operator - namespace: build-operator + name: shipwright-build-controller + namespace: shipwright-build spec: replicas: 1 selector: matchLabels: - name: build-operator + name: shipwright-build template: metadata: labels: - name: build-operator + name: shipwright-build spec: - serviceAccountName: build-operator + serviceAccountName: shipwright-build-controller containers: - - name: build-operator + - name: shipwright-build image: ko://github.com/shipwright-io/build/cmd/manager env: - name: WATCH_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - - name: BUILD_OPERATOR_LEADER_ELECTION_NAMESPACE + - name: BUILD_CONTROLLER_LEADER_ELECTION_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace @@ -30,8 +30,8 @@ spec: valueFrom: fieldRef: fieldPath: metadata.name - - name: OPERATOR_NAME - value: "build-operator" + - name: CONTROLLER_NAME + value: "shipwright-build" livenessProbe: exec: command: diff --git a/deploy/service_account.yaml b/deploy/service_account.yaml deleted file mode 100644 index b8e9691c64..0000000000 --- a/deploy/service_account.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: build-operator - namespace: build-operator diff --git a/docs/build.md b/docs/build.md index bf1b7e58bf..0e6a56100e 100644 --- a/docs/build.md +++ b/docs/build.md @@ -301,7 +301,7 @@ Please consider the description of the attributes under `.spec.runtime`: > Specifying the runtime section will cause a `BuildRun` to push `spec.output.image` twice. First, the image produced by chosen `BuildStrategy` is pushed, and next it gets reused to construct the runtime-image, which is pushed again, overwriting `BuildStrategy` outcome. > Be aware, specially in situations where the image push action triggers automation steps. Since the same tag will be reused, you might need to take this in consideration when using runtime-images. -Under the cover, the runtime image will be an additional step in the generated Task spec of the TaskRun. It uses [Kaniko](https://github.com/GoogleContainerTools/kaniko) to run a container build using the `gcr.io/kaniko-project/executor:v1.5.1` image. You can overwrite this image by adding the environment variable `KANIKO_CONTAINER_IMAGE` to the [build operator deployment](../deploy/operator.yaml). +Under the cover, the runtime image will be an additional step in the generated Task spec of the TaskRun. It uses [Kaniko](https://github.com/GoogleContainerTools/kaniko) to run a container build using the `gcr.io/kaniko-project/executor:v1.5.1` image. You can overwrite this image by adding the environment variable `KANIKO_CONTAINER_IMAGE` to the [build controller deployment](../deploy/controller.yaml). ## BuildRun deletion diff --git a/docs/buildstrategies.md b/docs/buildstrategies.md index 09d0048c78..923135f317 100644 --- a/docs/buildstrategies.md +++ b/docs/buildstrategies.md @@ -286,7 +286,7 @@ spec: ### How does Tekton Pipelines handle resources -The **Build** operator relies on the Tekton [pipeline controller](https://github.com/tektoncd/pipeline) to schedule the `pods` that execute the above strategy steps. In a nutshell, the **Build** operator creates on run-time a Tekton **TaskRun**, and the **TaskRun** generates a new pod in the particular namespace. In order to build an image, the pod executes all the strategy steps one-by-one. +The **Build** controller relies on the Tekton [pipeline controller](https://github.com/tektoncd/pipeline) to schedule the `pods` that execute the above strategy steps. In a nutshell, the **Build** controller creates on run-time a Tekton **TaskRun**, and the **TaskRun** generates a new pod in the particular namespace. In order to build an image, the pod executes all the strategy steps one-by-one. Tekton manage each step resources **request** in a very particular way, see the [docs](https://github.com/tektoncd/pipeline/blob/master/docs/tasks.md#defining-steps). From this document, it mentions the following: diff --git a/docs/configuration.md b/docs/configuration.md index 07bcda8c74..970ef557e7 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -6,7 +6,7 @@ SPDX-License-Identifier: Apache-2.0 # Configuration -The `build-operator` is installed into Kubernetes with reasonable defaults. However, there are some settings that can be overridden using environment variables in [`operator.yaml`](../deploy/operator.yaml). +The controller is installed into Kubernetes with reasonable defaults. However, there are some settings that can be overridden using environment variables in [`controller.yaml`](../deploy/controller.yaml). The following environment variables are available: @@ -14,7 +14,7 @@ The following environment variables are available: | --- | --- | | `CTX_TIMEOUT` | Override the default context timeout used for all Custom Resource Definition reconciliation operations. | | `KANIKO_CONTAINER_IMAGE` | Specify the Kaniko container image to be used for the runtime image build instead of the default, for example `gcr.io/kaniko-project/executor:v1.5.1`. | -| `BUILD_OPERATOR_LEADER_ELECTION_NAMESPACE` | Set the namespace to be used to store the `build-operator` lock, by default it is in the same namespace as the operator itself. | -| `BUILD_OPERATOR_LEASE_DURATION` | Override the `LeaseDuration`, which is the duration that non-leader candidates will wait to force acquire leadership. | -| `BUILD_OPERATOR_RENEW_DEADLINE` | Override the `RenewDeadline`, which is the duration that the acting master will retry refreshing leadership before giving up. | -| `BUILD_OPERATOR_RETRY_PERIOD` | Override the `RetryPeriod`, which is the duration the LeaderElector clients should wait between tries of actions. | +| `BUILD_CONTROLLER_LEADER_ELECTION_NAMESPACE` | Set the namespace to be used to store the `shipwright-build-controller` lock, by default it is in the same namespace as the controller itself. | +| `BUILD_CONTROLLER_LEASE_DURATION` | Override the `LeaseDuration`, which is the duration that non-leader candidates will wait to force acquire leadership. | +| `BUILD_CONTROLLER_RENEW_DEADLINE` | Override the `RenewDeadline`, which is the duration that the acting master will retry refreshing leadership before giving up. | +| `BUILD_CONTROLLER_RETRY_PERIOD` | Override the `RetryPeriod`, which is the duration the LeaderElector clients should wait between tries of actions. | diff --git a/docs/development/authentication.md b/docs/development/authentication.md index 8aef9e9d65..2d38f348fb 100644 --- a/docs/development/authentication.md +++ b/docs/development/authentication.md @@ -6,7 +6,7 @@ SPDX-License-Identifier: Apache-2.0 # Understanding authentication at runtime -The following document provides an introduction around the different authentication methods that can take place during an image build when using the Build operator. +The following document provides an introduction around the different authentication methods that can take place during an image build when using the Build controller. - [Overview](#overview) - [Build Secrets Annotation](#build-secrets-annotation) diff --git a/docs/development/local_development.md b/docs/development/local_development.md index c34116e253..684933bc42 100644 --- a/docs/development/local_development.md +++ b/docs/development/local_development.md @@ -6,16 +6,16 @@ SPDX-License-Identifier: Apache-2.0 # Running on development mode -The following document highlights how to deploy a Build operator locally for running on development mode. +The following document highlights how to deploy a Build controller locally for running on development mode. -**Before generating an instance of the Build operator, ensure the following:** +**Before generating an instance of the Build controller, ensure the following:** - Target your Kubernetes cluster. We recommend the usage of KinD for development, which you can launch via our [install-kind.sh](/hack/install-kind.sh) script. - On the cluster, ensure the Tekton controllers are running. You can use our Tekton installation script in [install-tekton.sh](/hack/install-tekton.sh) --- -Once the code have been modified, you can generate an instance of the Build operator running locally to validate your changes. For running the Build operator locally via the `local` target: +Once the code have been modified, you can generate an instance of the Build controller running locally to validate your changes. For running the Build controller locally via the `local` target: ```sh pushd $GOPATH/src/github.com/shipwright-io/build @@ -23,4 +23,4 @@ pushd $GOPATH/src/github.com/shipwright-io/build popd ``` -_Note_: The above target will uninstall/install all related CRDs and start an instance of the operator via the `operator-sdk` binary. All existing CRDs instances will be deleted. +_Note_: The above target will uninstall/install all related CRDs and start an instance of the controller via the `operator-sdk` binary. All existing CRDs instances will be deleted. diff --git a/docs/development/testing.md b/docs/development/testing.md index 62ead6c55c..3debf0ac3a 100644 --- a/docs/development/testing.md +++ b/docs/development/testing.md @@ -87,7 +87,7 @@ Integration tests are designed based on the following: - All significant features should have an integration test. - They require to have access to a Kubernetes cluster. -- Each test generates its own instance of the build operator, namespace and resources. +- Each test generates its own instance of the build controller, namespace and resources. - After test are executed, all generated resources for the particular test are removed. - They test all the interactions between components that have a relationship. - They do not test an e2e flow. diff --git a/docs/metrics.md b/docs/metrics.md index 4c3d5d473c..4790d9c331 100644 --- a/docs/metrics.md +++ b/docs/metrics.md @@ -8,7 +8,7 @@ SPDX-License-Identifier: Apache-2.0 The Build component exposes several metrics to help you monitor the health and behavior of your build resources. -Following build metrics are exposed at service `build-operator-metrics` on port `8383`. +Following build metrics are exposed on port `8383`. | Name | Type | Description | Labels | Status | |:-----------------------------------------------------|:----------|:--------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------| @@ -34,14 +34,14 @@ Environment variables can be set to use custom buckets for the histogram metrics | `build_buildrun_taskrun_rampup_duration_seconds` | `PROMETHEUS_BR_RAMPUP_DUR_BUCKETS` | `0,1,2,3,4,5,6,7,8,9,10` | | `build_buildrun_taskrun_pod_rampup_duration_seconds` | `PROMETHEUS_BR_RAMPUP_DUR_BUCKETS` | `0,1,2,3,4,5,6,7,8,9,10` | -The values have to be a comma-separated list of numbers. You need to set the environment variable for the build operator for your customization to become active. When running locally, set the variable right before starting the operator: +The values have to be a comma-separated list of numbers. You need to set the environment variable for the build controller for your customization to become active. When running locally, set the variable right before starting the controller: ```bash export PROMETHEUS_BR_COMP_DUR_BUCKETS=30,60,90,120,180,240,300,360,420,480 make local ``` -When you deploy the build operator in a Kubernetes cluster, you need to extend the `spec.containers[0].spec.env` section of the sample deployment file, [operator.yaml](../deploy/operator.yaml). Add an additional entry: +When you deploy the build controller in a Kubernetes cluster, you need to extend the `spec.containers[0].spec.env` section of the sample deployment file, [controller.yaml](../deploy/controller.yaml). Add an additional entry: ```yaml [...] @@ -74,7 +74,7 @@ export PROMETHEUS_ENABLED_LABELS=buildstrategy,namespace,build make local ``` -When you deploy the build operator in a Kubernetes cluster, you need to extend the `spec.containers[0].spec.env` section of the sample deployment file, [operator.yaml](../deploy/operator.yaml). Add an additional entry: +When you deploy the build controller in a Kubernetes cluster, you need to extend the `spec.containers[0].spec.env` section of the sample deployment file, [controller.yaml](../deploy/controller.yaml). Add an additional entry: ```yaml [...] diff --git a/docs/profiling.md b/docs/profiling.md index f26aca6c2b..9be18d3213 100644 --- a/docs/profiling.md +++ b/docs/profiling.md @@ -6,24 +6,24 @@ SPDX-License-Identifier: Apache-2.0 # Build Controller Profiling -The build operator supports a `pprof` profiling mode, which is omitted from the binary by default. To use the profiling, use the operator image that was built with `pprof` enabled. +The build controller supports a `pprof` profiling mode, which is omitted from the binary by default. To use the profiling, use the controller image that was built with `pprof` enabled. -## Enable `pprof` in the build operator +## Enable `pprof` in the build controller -In the Kubernetes cluster, edit the `build-operator` deployment to use the container tag with the `debug` suffix. +In the Kubernetes cluster, edit the `shipwright-build-controller` deployment to use the container tag with the `debug` suffix. ```sh kubectl --namespace set image \ - deployment/build-operator \ - build-operator="$(kubectl --namespace get deployment build-operator --output jsonpath='{.spec.template.spec.containers[].image}')-debug" + deployment/shipwright-build-controller \ + shipwright-build-controller="$(kubectl --namespace get deployment shipwright-build-controller --output jsonpath='{.spec.template.spec.containers[].image}')-debug" ``` -## Connect `go pprof` to build operator +## Connect `go pprof` to build controller -Depending on the respective setup, there could be multiple build operator pods for high availability reasons. In this case, you have to look-up the current leader first. The following command can be used to verify the currently active leader: +Depending on the respective setup, there could be multiple build controller pods for high availability reasons. In this case, you have to look-up the current leader first. The following command can be used to verify the currently active leader: ```sh -kubectl --namespace get configmap build-operator-lock --output json \ +kubectl --namespace get configmap shipwright-build-controller-lock --output json \ | jq --raw-output '.metadata.annotations["control-plane.alpha.kubernetes.io/leader"]' \ | jq --raw-output .holderIdentity ``` @@ -31,7 +31,7 @@ kubectl --namespace get configmap build-operator-lock --output json The `pprof` endpoint is not exposed in the cluster and can only be used from inside the container. Therefore, set-up port-forwarding to make the `pprof` port available locally. ```sh -kubectl --namespace port-forward 8383:8383 +kubectl --namespace port-forward 8383:8383 ``` Now, you can setup a local webserver to browse through the profiling data. diff --git a/hack/install-tekton.sh b/hack/install-tekton.sh index e7f86cfa76..f7b4febb96 100755 --- a/hack/install-tekton.sh +++ b/hack/install-tekton.sh @@ -5,8 +5,7 @@ # SPDX-License-Identifier: Apache-2.0 # -# Installs Tekton Pipelines operator. -# +# Installs Tekton Pipelines. set -eu diff --git a/pkg/config/config.go b/pkg/config/config.go index 0188d75535..b49cd13579 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -31,11 +31,11 @@ const ( prometheusEnabledLabelsEnvVar = "PROMETHEUS_ENABLED_LABELS" leaderElectionNamespaceDefault = "default" - leaderElectionNamespaceEnvVar = "BUILD_OPERATOR_LEADER_ELECTION_NAMESPACE" + leaderElectionNamespaceEnvVar = "BUILD_CONTROLLER_LEADER_ELECTION_NAMESPACE" - leaseDurationEnvVar = "BUILD_OPERATOR_LEASE_DURATION" - renewDeadlineEnvVar = "BUILD_OPERATOR_RENEW_DEADLINE" - retryPeriodEnvVar = "BUILD_OPERATOR_RETRY_PERIOD" + leaseDurationEnvVar = "BUILD_CONTROLLER_LEASE_DURATION" + renewDeadlineEnvVar = "BUILD_CONTROLLER_RENEW_DEADLINE" + retryPeriodEnvVar = "BUILD_CONTROLLER_RETRY_PERIOD" ) var ( diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index 7deaf69be2..666e6ac2d5 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -57,17 +57,17 @@ var _ = Describe("Config", func() { }) It("should allow for an override of the operator leader election namespace using an environment variable", func() { - var overrides = map[string]string{"BUILD_OPERATOR_LEADER_ELECTION_NAMESPACE": "build-operator"} + var overrides = map[string]string{"BUILD_CONTROLLER_LEADER_ELECTION_NAMESPACE": "shipwright-build"} configWithEnvVariableOverrides(overrides, func(config *Config) { - Expect(config.ManagerOptions.LeaderElectionNamespace).To(Equal("build-operator")) + Expect(config.ManagerOptions.LeaderElectionNamespace).To(Equal("shipwright-build")) }) }) It("should allow for an override of the operator leader election times using environment variables", func() { var overrides = map[string]string{ - "BUILD_OPERATOR_LEASE_DURATION": "42s", - "BUILD_OPERATOR_RENEW_DEADLINE": "32s", - "BUILD_OPERATOR_RETRY_PERIOD": "10s", + "BUILD_CONTROLLER_LEASE_DURATION": "42s", + "BUILD_CONTROLLER_RENEW_DEADLINE": "32s", + "BUILD_CONTROLLER_RETRY_PERIOD": "10s", } configWithEnvVariableOverrides(overrides, func(config *Config) { diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index d2b1f52ed1..e1dc8392fd 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -105,7 +105,7 @@ var _ = SynchronizedBeforeSuite(func() []byte { // TODO we currently have no codepath where this is relevant, but this namespace is the wrong one // it is the watch namespace, but needs to be the operator namespace namespace, - "build-operator", + "shipwright-build-controller", 1, operatorDeploymentRetryInterval, operatorDeploymentTimeout,