From 0f622a1db34331bb26b07e1a956b42a6851d08a1 Mon Sep 17 00:00:00 2001
From: aarohib <48060050+aarohib@users.noreply.github.com>
Date: Tue, 20 Jun 2023 09:03:04 -0700
Subject: [PATCH] Updates Dynamic Config reference (#2123)
* Fixes bad update that published an old version of the dyanmic config reference.
* fixes header level
---
ASSEMBLY_REPORT.md | 18 +-
.../references/configuration.json | 2 +-
.../references/dynamic-configuration.json | 16 ++
.../concepts/what-is-a-workflow-execution.md | 2 +-
.../concepts/what-is-cluster-configuration.md | 2 +-
docs-src/references/dynamic-configuration.md | 2 +-
docs/concepts/clusters.md | 4 +-
docs/concepts/workers.md | 130 +++++------
docs/concepts/workflows.md | 2 +-
docs/dev-guide/golang/versioning.md | 45 ++--
docs/dev-guide/javalang/versioning.md | 44 ++--
docs/references/configuration.md | 2 +-
docs/references/dynamic-configuration.md | 217 +++++++++---------
13 files changed, 244 insertions(+), 242 deletions(-)
create mode 100644 assembly/guide-configs/references/dynamic-configuration.json
diff --git a/ASSEMBLY_REPORT.md b/ASSEMBLY_REPORT.md
index cf3f94a096..3cc2448d3d 100644
--- a/ASSEMBLY_REPORT.md
+++ b/ASSEMBLY_REPORT.md
@@ -1,14 +1,14 @@
# Docs Assembly Workflow report
-Last assembled: Friday June 16 2023 10:28:07 AM -0700
+Last assembled: Monday June 19 2023 12:15:33 PM -0700
-Assembly Workflow Id: docs-full-assembly-dail-macbook
+Assembly Workflow Id: docs-full-assembly
-86 guide configurations found.
+87 guide configurations found.
-1451 information nodes found.
+1461 information nodes found.
-1207 information nodes are attached to guides.
+1221 information nodes are attached to guides.
The "Link Magic" Activity transformed the following "information node" identifiers into site paths:
@@ -354,12 +354,12 @@ concepts/what-is-an-activity-task -> /workers#activity-task
concepts/what-is-cluster-configuration -> /clusters#dynamicconfiguration
+concepts/what-is-a-retry-policy -> /retry-policies#
+
concepts/what-is-a-workflow-execution -> /workflows#workflow-execution
concepts/what-is-a-workflow-execution-timeout -> /workflows#workflow-execution-timeout
-concepts/what-is-a-retry-policy -> /retry-policies#
-
concepts/what-is-a-workflow-task-timeout -> /workflows#workflow-task-timeout
concepts/what-is-an-activity-type -> /activities#activity-type
@@ -482,6 +482,8 @@ java/namespaces -> /dev-guide/java/features#namespaces
clusters/how-to-set-up-archival -> /cluster-deployment-guide#set-up-archival
+references/dynamic-configuration -> /references/dynamic-configuration#
+
concepts/what-is-a-default-data-converter -> #default-data-converter
go/custom-payload-conversion -> /dev-guide/go/features#custom-payload-conversion
@@ -930,6 +932,8 @@ java/how-to-get-the-result-of-a-workflow-execution-in-java -> #get-workflow-resu
concepts/what-is-worker-versioning -> /workers#worker-versioning
+go/testing -> /dev-guide/go/testing#replay
+
go/tracing -> /dev-guide/go/observability#tracing-and-context-propogation
go/logging -> /dev-guide/go/observability#logging
diff --git a/assembly/guide-configs/references/configuration.json b/assembly/guide-configs/references/configuration.json
index 073ff2c05f..3e3d94b70a 100644
--- a/assembly/guide-configs/references/configuration.json
+++ b/assembly/guide-configs/references/configuration.json
@@ -3,7 +3,7 @@
"id": "configuration",
"file_dir": "references",
"title": "Temporal Cluster configuration reference",
- "sidebar_label": "Cluster config",
+ "sidebar_label": "Cluster configuration",
"description": "Much of the behavior of a Temporal Cluster is configured using the `development.yaml` file.",
"toc_max_heading_level": 4,
"add_tabs_support": false,
diff --git a/assembly/guide-configs/references/dynamic-configuration.json b/assembly/guide-configs/references/dynamic-configuration.json
new file mode 100644
index 0000000000..50d4932dea
--- /dev/null
+++ b/assembly/guide-configs/references/dynamic-configuration.json
@@ -0,0 +1,16 @@
+{
+ "file_name": "dynamic-configuration.md",
+ "id": "dynamic-configuration",
+ "file_dir": "references",
+ "title": "Temporal Cluster dynamic configuration reference",
+ "sidebar_label": "Dynamic configuration",
+ "description": "Temporal Cluster provides dynamic configuration keys that you can update and apply to a running Cluster without restarting your services.",
+ "toc_max_heading_level": 4,
+ "add_tabs_support": false,
+ "sections": [
+ {
+ "type": "p",
+ "id": "references/dynamic-configuration"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/docs-src/concepts/what-is-a-workflow-execution.md b/docs-src/concepts/what-is-a-workflow-execution.md
index a3817cfd31..62e1985d95 100644
--- a/docs-src/concepts/what-is-a-workflow-execution.md
+++ b/docs-src/concepts/what-is-a-workflow-execution.md
@@ -152,8 +152,8 @@ To protect the system, Temporal enforces a maximum of 50,000 pending Activities,
Currently, there is no limit on the total number of Signals that a Workflow Execution can receive.
These limits are set with the following [dynamic configuration keys](https://github.com/temporalio/temporal/blob/master/service/history/configs/config.go):
-- `limit.numPendingChildExecutions.error`
- `limit.numPendingActivities.error`
+- `limit.numPendingChildExecutions.error`
- `limit.numPendingSignals.error`
- `limit.numPendingCancelRequests.error`
- `history.maximumSignalsPerExecution`
diff --git a/docs-src/concepts/what-is-cluster-configuration.md b/docs-src/concepts/what-is-cluster-configuration.md
index 50dc2f5fe6..7d36118e5f 100644
--- a/docs-src/concepts/what-is-cluster-configuration.md
+++ b/docs-src/concepts/what-is-cluster-configuration.md
@@ -45,7 +45,7 @@ All dynamic configuration keys provided by Temporal have default values that are
You can override the default values by setting different values for the keys in a YAML file and setting the [dynamic configuration client](/references/configuration#dynamicconfigclient) to poll this file for updates.
Setting dynamic configuration for your Cluster is optional.
-Setting overrides for some configuration keys upates the Cluster configuration immediately.
+Setting overrides for some configuration keys updates the Cluster configuration immediately.
However, for configuration fields that are checked at startup (such as thread pool size), you must restart the server for the changes to take effect.
Use dynamic configuration keys to fine-tune your self-deployed Cluster setup.
diff --git a/docs-src/references/dynamic-configuration.md b/docs-src/references/dynamic-configuration.md
index 59a69d34cb..88bce010aa 100644
--- a/docs-src/references/dynamic-configuration.md
+++ b/docs-src/references/dynamic-configuration.md
@@ -177,7 +177,7 @@ If the number of queries made to the Persistence store exceeds the dynamic confi
| `system.visibilityPersistenceMaxReadQPS` | Int | Maximum number queries per second that Visibility database can receive for read operations. | 9000 |
| `system.visibilityPersistenceMaxWriteQPS` | Int | Maximum number of queries per second that Visibility database can receive for write operations. | 9000 |
-#### Activity and Workflow default policy setting
+### Activity and Workflow default policy setting
You can define default values for Activity and Workflow [Retry Policies](/concepts/what-is-a-retry-policy) at the Cluster level with the following dynamic configuration keys.
diff --git a/docs/concepts/clusters.md b/docs/concepts/clusters.md
index 2ba267974d..ddf223dbf4 100644
--- a/docs/concepts/clusters.md
+++ b/docs/concepts/clusters.md
@@ -308,12 +308,12 @@ All dynamic configuration keys provided by Temporal have default values that are
You can override the default values by setting different values for the keys in a YAML file and setting the dynamic configuration client
Temporal Cluster configuration reference
Much of the behavior of a Temporal Cluster is configured using the `development.yaml` file.
Learn more to poll this file for updates.
Setting dynamic configuration for your Cluster is optional.
-Setting overrides for some configuration keys upates the Cluster configuration immediately.
+Setting overrides for some configuration keys updates the Cluster configuration immediately.
However, for configuration fields that are checked at startup (such as thread pool size), you must restart the server for the changes to take effect.
Use dynamic configuration keys to fine-tune your self-deployed Cluster setup.
-For details on dynamic configuration keys, see [Dynamic configuration reference](/references/dynamic-configuration).
+For details on dynamic configuration keys, see Dynamic configuration reference
Dynamic configuration reference
Dynamic configuration key values can be set to override the default values in a Cluster configuration.
Learn more.
For dynamic configuration examples, see .
diff --git a/docs/concepts/workers.md b/docs/concepts/workers.md
index 26b4326c44..ff0ba332bd 100644
--- a/docs/concepts/workers.md
+++ b/docs/concepts/workers.md
@@ -325,7 +325,9 @@ It also includes features like concurrent session limitations and Worker failure
## Worker Versioning
-Worker Versioning simplifies the process of deploying changes to [Workflow Definitions](/workflows/#workflow-definition). It does this by allowing you to define sets of versions that are compatible with each other, and then assigning a Build ID to the code that defines a Worker. The Temporal Server uses the Build ID to determine which versions of a Workflow Definition a Worker can process.
+Worker Versioning simplifies the process of deploying changes to [Workflow Definitions](/workflows/#workflow-definition).
+It does this by letting you define sets of versions that are compatible with each other, and then assigning a Build ID to the code that defines a Worker.
+The Temporal Server uses the Build ID to determine which versions of a Workflow Definition a Worker can process.
We recommend that you read about Workflow Definitions before proceeding, because Workflow Versioning is largely concerned with helping to manage nondeterministic changes to those definitions.
@@ -333,40 +335,49 @@ Worker Versioning helps manage nondeterministic changes by providing a convenien
Accomplish this goal by assigning a Build ID (a free-form string) to the code that defines a Worker, and specifying which Build IDs are compatible with each other by updating the version sets associated with the Task Queue, stored by the Temporal Server.
-### When and why should you use Worker Versioning
+### When and why you should use Worker Versioning
-The main reason to use this feature is to deploy incompatible changes to short-lived [Workflows](/workflows). On Task Queues using this feature, the Workflow starter doesn't have to know about the introduction of new versions.
+The main reason to use this feature is to deploy incompatible changes to short-lived [Workflows](/workflows).
+On Task Queues using this feature, the Workflow starter doesn't have to know about the introduction of new versions.
The new code in the newly deployed Workers executes new [Workflow Executions](#workflow-execution), while only Workers with an appropriate version process old Workflow Executions.
#### Decommision old Workers
-You can decommission old Workers once you archive all open Workflows using their version, or if you have no need to Query closed Workflows, you can decommission them once no open Workflows remain at that version.
+You can decommission old Workers after you archive all open Workflows using their version.
+If you have no need to query closed Workflows, you can decommission them when no open Workflows remain at that version.
-For example, if you have a Workflow that completes within a day. A good strategy is to assign a new Build ID to every new Worker build and add it as the new overall default in the version sets.
+For example, if you have a Workflow that completes within a day, a good strategy is to assign a new Build ID to every new Worker build and add it as the new overall default in the version sets.
Because your Workflow completes in a day, you know that you won't need to keep older Workers running for more than a day after you deploy the new version (assuming availability).
-Removing old versions from the sets isn't necessary. Leaving them doesn't cause any harm.
+Removing old versions from the sets isn't necessary.
+Leaving them doesn't cause any harm.
-You can apply this technique to longer-lived Workflows too; however, you may need to run multiple Worker versions simultaneously while open Workflows complete.
+You can apply this technique to longer-lived Workflows too; however, you might need to run multiple Worker versions simultaneously while open Workflows complete.
#### Deploy code changes to Workers
-The feature also allows you to implement compatible changes to or prevent a buggy code path from executing on currently open Workflows. You can achieve this by adding a new version to an existing set and defining it as _compatible_ with an existing version, which shouldn't execute any future Workflow Tasks. Since the new version processes existing [Event Histories](/workflows/#event-history), it must adhere to the usual [deterministic constraints](/workflows/#deterministic-constraints), and you may need to use one of the [versioning APIs](/workflows/#workflow-versioning).
+The feature also lets you implement compatible changes to or prevent a buggy code path from executing on currently open Workflows.
+You can achieve this by adding a new version to an existing set and defining it as _compatible_ with an existing version, which shouldn't execute any future Workflow Tasks.
+Because the new version processes existing [Event Histories](/workflows/#event-history), it must adhere to the usual [deterministic constraints](/workflows/#deterministic-constraints), and you might need to use one of the [versioning APIs](/workflows/#workflow-versioning).
-Moreover, this feature enables you to make incompatible changes to Activity Definitions in conjunction with incompatible changes to Workflow Definitions that use those Activities. This functionality works because any Activity that a Workflow schedules on the same Task Queue only gets dispatched to Workers compatible with the Workflow that scheduled it.
-If you want to change an Activity Definition's type signature while creating a new incompatible Build ID for a Worker, you can do so without worrying about the Activity failing to execute on some other Worker with an incompatible definition. The same principle applies to Child Workflows.
+Moreover, this feature let you make incompatible changes to Activity Definitions in conjunction with incompatible changes to Workflow Definitions that use those Activities.
+This functionality works because any Activity that a Workflow schedules on the same Task Queue gets dispatched only to Workers compatible with the Workflow that scheduled it.
+If you want to change an Activity Definition's type signature while creating a new incompatible Build ID for a Worker, you can do so without worrying about the Activity failing to execute on some other Worker with an incompatible definition.
+The same principle applies to Child Workflows.
:::tip
-Public facing Workflows on a versioned Task Queue shouldn't change their signature as it contradicts the purpose of Workflow-launching Clients remaining unaware of changes in the Workflow Definition. If you need to change a Workflow's signature, use a different Workflow Type or a completely new Task Queue.
+Public-facing Workflows on a versioned Task Queue shouldn't change their signatures because doing so contradicts the purpose of Workflow-launching Clients remaining unaware of changes in the Workflow Definition.
+If you need to change a Workflow's signature, use a different Workflow Type or a completely new Task Queue.
:::
:::note
-If you schedule an Activity or a Child Workflow on _a different_ Task Queue from the one the Workflow runs on, the system doesn't assign a specific version. This means if the target queue is versioned, they run on the latest default, and if its unversioned, they operate as they would have without this feature.
+If you schedule an Activity or a Child Workflow on _a different_ Task Queue from the one the Workflow runs on, the system doesn't assign a specific version.
+This means if the target queue is versioned, they run on the latest default, and if it's unversioned, they operate as they would have without this feature.
:::
@@ -374,11 +385,12 @@ If you schedule an Activity or a Child Workflow on _a different_ Task Queue from
By default, a versioned Task Queue's Continue-as-New function starts the continued Workflow on the same compatible set as the original Workflow.
-If you continue-as-new onto a different Task Queue, the system doesn't assign any particular version. You also have the option to specify that the continued Workflow should start using the Task Queue's latest default version.
+If you continue-as-new onto a different Task Queue, the system doesn't assign any particular version.
+You also have the option to specify that the continued Workflow should start using the Task Queue's latest default version.
### How to use Worker Versioning
-To use Worker Versioning:
+To use Worker Versioning, follow these steps:
1. Define Worker build-identifier version sets for the Task Queue.
You can use either the `temporal` CLI or your choice of SDK.
@@ -386,19 +398,17 @@ To use Worker Versioning:
#### Defining the version sets
-Whether you use `temporal` [CLI](/cli/) or an SDK, updating the version sets feels the same. You
-specify the Task Queue that you're targeting, the Build ID that you're adding (or
-promoting), whether it becomes the new default version, and any existing versions it should be
-considered compatible with.
+Whether you use [Temporal CLI](/cli/) or an SDK, updating the version sets feels the same.
+You specify the Task Queue that you're targeting, the Build ID that you're adding (or promoting), whether it becomes the new default version, and any existing versions it should be considered compatible with.
The rest of this section uses updates to one Task Queue's version sets as examples.
-By default, both Task Queues and Workers are in an unversioned state. [Unversioned Worker](#unversioned-workers) can poll
-unversioned Task Queues and receive tasks. To use this feature, both the Task Queue and the Worker
-have to have Build IDs associated with them.
+By default, both Task Queues and Workers are in an unversioned state.
+[Unversioned Worker](#unversioned-workers) can poll unversioned Task Queues and receive tasks.
+To use this feature, both the Task Queue and the Worker must be associated with Build IDs.
-If you run a Worker using versioning against a Task Queue that has not been set up to use versioning (or is missing that Worker's Build ID), it won't
-get any tasks. Likewise, a unversioned Worker polling a Task Queue with versioning won't work either.
+If you run a Worker using versioning against a Task Queue that has not been set up to use versioning (or is missing that Worker's Build ID), it won't get any tasks.
+Likewise, a unversioned Worker polling a Task Queue with versioning won't work either.
:::note Versions don't need to follow semver or any other semantic versioning scheme!
@@ -417,10 +427,9 @@ Your version sets now look like this:
All new Workflows started on the Task Queue have their first tasks assigned to version `1.0`.
Workers with their Build ID set to `1.0` receive these Tasks.
-If Workflows that don't have an assigned version are still running on the Task Queue, Workers
-without a version, take those tasks. So ensure that such Workers are still operational if any
-Workflows were open when you added the first version. If you deployed any Workers with a _different_
-version, those Workers receive no Tasks.
+If Workflows that don't have an assigned version are still running on the Task Queue, Workers without a version take those tasks.
+So ensure that such Workers are still operational if any Workflows were open when you added the first version.
+If you deployed any Workers with a _different_ version, those Workers receive no Tasks.
Now, imagine you need change the Workflow for some reason.
@@ -435,19 +444,17 @@ Existing `1.0` Workflows keep generating tasks targeting `1.0`.
Each deployment of Workers receives their respective Tasks.
This same concept carries forward for each new incompatible version.
-Maybe you have a bug in `2.0`, and you want to make sure all open `2.0` Workflows switch to some new
-code as fast as possible. So, you'll add `2.1` to the sets, marking it as compatible with `2.0`. Now your
-sets look like this:
+Maybe you have a bug in `2.0`, and you want to make sure all open `2.0` Workflows switch to some new code as fast as possible.
+So, you add `2.1` to the sets, marking it as compatible with `2.0`.
+Now your sets look like this:
| set 1 | set 2 (default) |
| ------------- | --------------- |
| 1.0 (default) | 2.0 |
| | 2.1 (default) |
-All new Workflow Tasks that are generated for Workflows whose last Workflow Task completion was on
-version `2.0` are now assigned to version `2.1`. Because you specified that `2.1` is compatible with
-`2.0`, Temporal Server assumes that Workers with this version can process the existing Event
-Histories successfully.
+All new Workflow Tasks that are generated for Workflows whose last Workflow Task completion was on version `2.0` are now assigned to version `2.1`.
+Because you specified that `2.1` is compatible with `2.0`, Temporal Server assumes that Workers with this version can process the existing Event Histories successfully.
Continue with your normal development cycle, adding a `3.0` version.
Nothing new here:
@@ -458,9 +465,9 @@ Nothing new here:
| | 2.1 (default) | |
Now imagine that version `3.0` doesn't have an explicit bug, but something about the business logic
-is less than ideal. You are okay with existing `3.0` Workflows running to completion, but you want new
-Workflows to use the old `2.x` branch. This operation is supported by performing an update targeting
-`2.1` (or `2.0`) and setting it's set as the current default, which results in these sets:
+is less than ideal.
+You are okay with existing `3.0` Workflows running to completion, but you want new Workflows to use the old `2.x` branch.
+This operation is supported by performing an update targeting `2.1` (or `2.0`) and setting its set as the current default, which results in these sets:
| set 1 | set 3 | set 2 (default) |
| ------------- | ------------- | --------------- |
@@ -473,53 +480,45 @@ Now new Workflows start on `2.1`.
A request to change the sets can do one of the following:
-- Add a version to the sets as new the default version in a new overall-default compatible set.
+- Add a version to the sets as the new default version in a new overall-default compatible set.
- Add a version to an existing set that's compatible with an existing version.
- - Optionally making it the default for that set
+ - Optionally making it the default for that set.
- Optionally making that set the overall-default set.
- Promote a version within an existing set to become the default for that set.
- Promote a set to become the overall-default set.
-You can't explicitly delete versions. This helps you avoid the situation in which Workflows
-accidentally become stuck with no means of making progress because the version they're associated
-with no longer exists.
+You can't explicitly delete versions.This helps you avoid the situation in which Workflows accidentally become stuck with no means of making progress because the version they're associated with no longer exists.
-However, sometimes you might want to do this intentionally. If you _want_ to make sure that all
-Workflows currently being processed by, say, `2.0` stop (even if you don't yet have a new version
-ready) you can add a new version `2.1` to the sets marked as compatible with `2.0`. New tasks will
-target `2.1`, but since you haven't deployed any `2.1` Workers, they won't make any
-progress.
+However, sometimes you might want to do this intentionally.
+If you _want_ to make sure that all Workflows currently being processed by, say, `2.0` stop (even if you don't yet have a new version ready), you can add a new version `2.1` to the sets marked as compatible with `2.0`.
+New tasks will target `2.1`, but because you haven't deployed any `2.1` Workers, they won't make any progress.
#### Set constraints
-The sets have a maximum size limit, which defaults to 1000 build ids across all sets. This limit is
-configurable on Temporal Server via the `limit.versionBuildIDsPerQueue` dynamic config property.
-Operations to add new Build IDs to the sets will fail if the limit would be exceeded.
+The sets have a maximum size limit, which defaults to 100 build IDs across all sets.
+This limit is configurable on Temporal Server via the `limit.versionBuildIdLimitPerQueue` dynamic config property.
+Operations to add new Build IDs to the sets fail if the limit would be exceeded.
-There is also a limit on the number of sets, which defaults to 10. This limit is configurable via
-the `limit.versionCompatibleSetsPerQueue` dynamic config property.
+There is also a limit on the number of sets, which defaults to 10.
+This limit is configurable via the `limit.versionCompatibleSetLimitPerQueue` dynamic config property.
-In practice, these limits should rarely be a concern because a version is no longer needed after no
-open Workflows are using that version, and a background process will delete IDs and sets which are
-no longer needed.
+In practice, these limits should rarely be a concern because a version is no longer needed after no open Workflows are using that version, and a background process will delete IDs and sets that are no longer needed.
-There is also a limit on the size of each Build ID or version string, which defaults to 255
-characters. This limit is configurable on the server via the `limit.workerBuildIdSize` dynamic
-config property.
+There is also a limit on the size of each Build ID or version string, which defaults to 255 characters.
+This limit is configurable on the server via the `limit.workerBuildIdSize` dynamic config property.
### Build ID reachability
-Eventually, you'll want to know if you can retire the old Worker versions. Temporal provides functionality
-to help you determine if a version is still in by open or closed Workflows. You can use the
-`temporal` CLI to do this with the following command:
+Eventually, you'll want to know whether you can retire the old Worker versions.
+Temporal provides functionality to help you determine whether a version is still in use by open or closed Workflows.
+You can use the Temporal CLI to do this with the following command:
```command
temporal task-queue get-build-id-reachability
```
-The command tells if the Build ID in question is unreachable, only reachable by closed Workflows, or reachable by open and new Workflows on a per-Task-Queue basis.
-For example, this "2.0" Build ID is shown here by the CLI to be reachable by both new Workflows and
-some existing Workflows:
+The command determines, for each Task Queue, whether the Build ID in question is unreachable, only reachable by closed Workflows, or reachable by open and new Workfloww.
+For example, this "2.0" Build ID is shown here by the CLI to be reachable by both new Workflows and some existing Workflows:
```command
temporal task-queue get-build-id-reachability --build-id "2.0"
@@ -537,4 +536,5 @@ You can also use this API `GetWorkerTaskReachability` directly from within langu
### Unversioned Workers
-Unversioned Workers refer to Workers that have not opted into the Worker Versioning feature in their configuration. They will only receive tasks from Task Queues which do not have any version sets defined on them, or which have open workflows that began executing before versions were added to the queue.
+Unversioned Workers refer to Workers that have not opted into the Worker Versioning feature in their configuration.
+They receive tasks only from Task Queues that do not have any version sets defined on them, or that have open workflows that began executing before versions were added to the queue.
diff --git a/docs/concepts/workflows.md b/docs/concepts/workflows.md
index 336f9632ec..3b0c4e6f3b 100644
--- a/docs/concepts/workflows.md
+++ b/docs/concepts/workflows.md
@@ -298,8 +298,8 @@ To protect the system, Temporal enforces a maximum of 50,000 pending Activities,
Currently, there is no limit on the total number of Signals that a Workflow Execution can receive.
These limits are set with the following [dynamic configuration keys](https://github.com/temporalio/temporal/blob/master/service/history/configs/config.go):
-- `limit.numPendingChildExecutions.error`
- `limit.numPendingActivities.error`
+- `limit.numPendingChildExecutions.error`
- `limit.numPendingSignals.error`
- `limit.numPendingCancelRequests.error`
- `history.maximumSignalsPerExecution`
diff --git a/docs/dev-guide/golang/versioning.md b/docs/dev-guide/golang/versioning.md
index efe6cf34a8..10bd239c6c 100644
--- a/docs/dev-guide/golang/versioning.md
+++ b/docs/dev-guide/golang/versioning.md
@@ -173,17 +173,15 @@ Activity. But, we don't want to fail on that change, so we only check the functi
## Worker Versioning
-To make use of Worker Versioning in Go, you will need to
-do the following:
+To use Worker Versioning in Go, you need to do the following:
-1. Determine and assign a Build ID to your built Worker code, and opt in to versioning
-2. Tell the Task Queue your Worker is listening on about that Build ID, and whether its compatible with an existing Build ID
+1. Determine and assign a Build ID to your built Worker code, and opt in to versioning.
+2. Tell the Task Queue your Worker is listening on about that Build ID, and whether its compatible with an existing Build ID.
### Assign a Build ID to your Worker
-Let's say you've chosen `deadbeef` as your Build ID, which might be a short git commit hash (a
-reasonable choice as Build ID). To assign it in your Worker code, you'd assign
-the following Worker Options:
+Let's say you've chosen `deadbeef` as your Build ID, which might be a short git commit hash (a reasonable choice as Build ID).
+To assign it in your Worker code, you assign the following Worker Options:
```go
// ...
@@ -196,14 +194,14 @@ w := worker.New(c, "your_task_queue_name", workerOptions)
// ...
```
-That's all you need to do in your Worker code. Importantly, if you start this Worker, it won't
-receive any tasks. That's because you need to tell the Task Queue about your Worker's Build ID
-first.
+That's all you need to do in your Worker code.
+Importantly, if you start this Worker, it won't receive any tasks.
+That's because you need to tell the Task Queue about your Worker's Build ID first.
### Tell the Task Queue about your Worker's Build ID
Now you can use the SDK (or the Temporal CLI) to tell the Task Queue about your Worker's Build ID.
-You might want to do this as part of your CI deployment process. Using the Go SDK:
+You might want to do this as part of your CI deployment process.
```go
// ...
@@ -215,11 +213,10 @@ err := client.UpdateWorkerBuildIdCompatibility(ctx, &client.UpdateWorkerBuildIdC
})
```
-This will add the `deadbeef` Build ID to the Task Queue as the sole version in a new version set
-which will be the default for the queue. New Workflows execute on Workers with this ID,
-and existing ones will continue to process by appropriately compatible Workers.
+This code adds the `deadbeef` Build ID to the Task Queue as the sole version in a new version set, which becomes the default for the queue.
+New Workflows execute on Workers with this Build ID, and existing ones will continue to process by appropriately compatible Workers.
-If, instead, you wanted to add the Build ID to some existing compatible set, you can do this:
+If, instead, you want to add the Build ID to an existing compatible set, you can do this:
```go
// ...
@@ -232,8 +229,7 @@ err := client.UpdateWorkerBuildIdCompatibility(ctx, &client.UpdateWorkerBuildIdC
})
```
-This would add `deadbeef` to the existing compatible set containing `some-existing-build-id`, and
-would mark it as the new default ID for that set.
+This code adds `deadbeef` to the existing compatible set containing `some-existing-build-id` and marks it as the new default Build ID for that set.
You can also promote an existing Build ID in a set to be the default for that set:
@@ -247,8 +243,7 @@ err := client.UpdateWorkerBuildIdCompatibility(ctx, &client.UpdateWorkerBuildIdC
})
```
-As well as promote an entire set to become the default set for the queue (thus new Workflows will
-start using that set's default):
+You can also promote an entire set to become the default set for the queue. New Workflows will start using that set's default.
```go
// ...
@@ -262,17 +257,13 @@ err := client.UpdateWorkerBuildIdCompatibility(ctx, &client.UpdateWorkerBuildIdC
### Specify versions for Commands
-By default, Activities, Child Workflows, and Continue-as-New use the same compatible version
-set as the Workflow that invoked them if they're also using the same Task Queue.
+By default, Activities, Child Workflows, and Continue-as-New use the same compatible version set as the Workflow that invoked them if they're also using the same Task Queue.
-If you want to override this behavior, you can specify your intent via the `VersioningIntent` field
-on the appropriate options struct.
+If you want to override this behavior, you can specify your intent via the `VersioningIntent` field on the appropriate options struct.
-
+
-For example, if you wanted to use the latest default version for an Activity, you would do this
-inside your Workflow code:
+For example, if you want to use the latest default version for an Activity, do the following inside your Workflow code:
```go
// ...
diff --git a/docs/dev-guide/javalang/versioning.md b/docs/dev-guide/javalang/versioning.md
index d96eb866af..1674a526f1 100644
--- a/docs/dev-guide/javalang/versioning.md
+++ b/docs/dev-guide/javalang/versioning.md
@@ -111,16 +111,15 @@ in none of them, then they have to share the Id.
## Worker Versioning
-To make use of Worker Versioning in Java, you will need to do the following:
+To use Worker Versioning in Java, you need to do the following:
-1. Determine and assign a Build ID to your built Worker code, and opt in to versioning
-2. Tell the Task Queue your Worker is listening on about that Build ID, and whether its compatible with an existing Build ID
+1. Determine and assign a Build ID to your built Worker code, and opt in to versioning.
+2. Tell the Task Queue your Worker is listening on about that Build ID, and whether its compatible with an existing Build ID.
### Assign a Build ID to your Worker
-Let's say you've chosen `deadbeef` as your Build ID, which might be a short git commit hash (a
-reasonable choice as Build ID). To assign it in your Worker code, you'd assign
-the following Worker Options:
+Let's say you've chosen `deadbeef` as your Build ID, which might be a short git commit hash (a reasonable choice as Build ID).
+To assign it in your Worker code, assign the following Worker Options:
```java
// ...
@@ -133,14 +132,14 @@ Worker w = workerFactory.newWorker("your_task_queue_name", workerOptions);
// ...
```
-That's all you need to do in your Worker code. Importantly, if you start this Worker, it won't
-receive any tasks. That's because you need to tell the Task Queue about your Worker's Build ID
-first.
+That's all you need to do in your Worker code.
+Importantly, if you start this Worker, it won't receive any tasks.
+That's because you need to tell the Task Queue about your Worker's Build ID first.
### Tell the Task Queue about your Worker's Build ID
Now you can use the SDK (or the Temporal CLI) to tell the Task Queue about your Worker's Build ID.
-You might want to do this as part of your CI deployment process. Using the Go SDK:
+You might want to do this as part of your CI deployment process.
```java
// ...
@@ -148,11 +147,10 @@ workflowClient.updateWorkerBuildIdCompatability(
"your_task_queue_name", BuildIdOperation.newIdInNewDefaultSet("deadbeef"));
```
-This will add the `deadbeef` Build ID to the Task Queue as the sole version in a new version set
-which will be the default for the queue. New Workflows execute on Workers with this ID,
-and existing ones will continue to process by appropriately compatible Workers.
+This code adds the `deadbeef` Build ID to the Task Queue as the sole version in a new version set, which becomes the default for the queue.
+New Workflows execute on Workers with this Build ID, and existing ones will continue to process by appropriately compatible Workers.
-If, instead, you wanted to add the Build ID to some existing compatible set, you can do this:
+If, instead, you want to add the Build ID to an existing compatible set, you can do this:
```java
// ...
@@ -160,8 +158,7 @@ workflowClient.updateWorkerBuildIdCompatability(
"your_task_queue_name", BuildIdOperation.newCompatibleVersion("deadbeef", "some-existing-build-id"));
```
-This would add `deadbeef` to the existing compatible set containing `some-existing-build-id`, and
-would mark it as the new default ID for that set.
+This code adds `deadbeef` to the existing compatible set containing `some-existing-build-id` and marks it as the new default Build ID for that set.
You can also promote an existing Build ID in a set to be the default for that set:
@@ -171,8 +168,7 @@ workflowClient.updateWorkerBuildIdCompatability(
"your_task_queue_name", BuildIdOperation.promoteBuildIdWithinSet("deadbeef"));
```
-As well as promote an entire set to become the default set for the queue (thus new Workflows will
-start using that set's default):
+You can also promote an entire set to become the default set for the queue. New Workflows will start using that set's default.
```java
// ...
@@ -182,17 +178,13 @@ workflowClient.updateWorkerBuildIdCompatability(
### Specify versions for Commands
-By default, Activities, Child Workflows, and Continue-as-New use the same compatible version
-set as the Workflow that invoked them if they're also using the same Task Queue.
+By default, Activities, Child Workflows, and Continue-as-New use the same compatible version set as the Workflow that invoked them if they're also using the same Task Queue.
-If you want to override this behavior, you can specify your intent via the `setVersioningIntent`
-method on the `ActivityOptions`, `ChildWorkflowOptions`, or `ContinueAsNewOptions` objects.
+If you want to override this behavior, you can specify your intent via the `setVersioningIntent` method on the `ActivityOptions`, `ChildWorkflowOptions`, or `ContinueAsNewOptions` objects.
-
+
-For example, if you wanted to use the latest default version for an Activity, you could define your
-Activity Options like this:
+For example, if you want to use the latest default version for an Activity, you can define your Activity Options like this:
```java
// ...
diff --git a/docs/references/configuration.md b/docs/references/configuration.md
index 0971d8787e..5556755747 100644
--- a/docs/references/configuration.md
+++ b/docs/references/configuration.md
@@ -1,7 +1,7 @@
---
id: configuration
title: Temporal Cluster configuration reference
-sidebar_label: Cluster config
+sidebar_label: Cluster configuration
description: Much of the behavior of a Temporal Cluster is configured using the `development.yaml` file.
toc_max_heading_level: 4
tags:
diff --git a/docs/references/dynamic-configuration.md b/docs/references/dynamic-configuration.md
index 265f98d83a..6d75906bbb 100644
--- a/docs/references/dynamic-configuration.md
+++ b/docs/references/dynamic-configuration.md
@@ -1,13 +1,14 @@
---
id: dynamic-configuration
-title: Dynamic configuration reference
-description: Dynamic condifiguration key values can be set to override the default values in a Cluster configuration.
+title: Temporal Cluster dynamic configuration reference
sidebar_label: Dynamic configuration
+description: Temporal Cluster provides dynamic configuration keys that you can update and apply to a running Cluster without restarting your services.
+toc_max_heading_level: 4
tags:
- - reference
+- reference
---
-
+
Temporal Cluster provides dynamic configuration
What is Cluster configuration?
Cluster Configuration is the setup and configuration details of your Temporal Cluster, defined using YAML.
Learn more keys that you can update and apply to a running Cluster without restarting your services.
@@ -15,16 +16,16 @@ The dynamic configuration keys are set with default values when you create your
You can override these values as you test your Cluster setup for optimal performance according to your workload requirements.
For the complete list of dynamic configuration keys, see .
-Ensure that you check server releases notes for any changes to these keys and values.
+Ensure that you check server release notes for any changes to these keys and values.
-To check the default values set for a dynamic configuration key, check the following links:
+For the default values of dynamic configuration keys, check the following links:
- [Frontend Service](https://github.com/temporalio/temporal/blob/5783e781504d8ffac59f9848b830868f3139b980/service/frontend/service.go#L176)
- [History Service](https://github.com/temporalio/temporal/blob/5783e781504d8ffac59f9848b830868f3139b980/service/history/configs/config.go#L309)
- [Matching Service](https://github.com/temporalio/temporal/blob/5783e781504d8ffac59f9848b830868f3139b980/service/matching/config.go#L125)
- [Worker Service](https://github.com/temporalio/temporal/blob/5783e781504d8ffac59f9848b830868f3139b980/service/worker/service.go#L193)
-Note that setting dynamic configuration is optional.
+Setting dynamic configuration is optional.
Change these values only if you need to override the default values to achieve better performance on your Temporal Cluster.
Also, ensure that you test your changes before setting these in production.
@@ -63,67 +64,60 @@ testGetMapPropertyKey:
### Constraints
-Some dynamic configuration keys are set globally, and others can be customized on a Namespace or Task Queue level.
+You can define constraints on some dynamic configuration keys to set specific values that apply on a Namespace or Task Queue level.
+Not defining constraints on a dynamic configuration key sets the values across the Cluster.
-- To set global values for the configuration key with no constraints, use:
+- To set global values for the configuration key with no constraints, use the following:
```yaml
- keyName:
- - value: "my-value"
+ frontend.globalNamespaceRPS: # Total per-Namespace RPC rate limit applied across the Cluster.
+ - value: 5000
```
- For keys that can be customized at Namespace level, you can specify multiple values for different Namespaces in addition to one default value that applies globally to all Namespaces.
To set values at a Namespace level, use `namespace` (String) as shown in the following example.
```yaml
- keyName:
+ frontend.persistenceNamespaceMaxQPS: # Rate limit on the number of queries the Frontend sends to the Persistence store.
- constraints: {} # Sets default value that applies to all Namespaces
- value: "value-for-all-the-rest"
- - constraints: {namespace: "namespace1"} # Sets value for "namespace1" Namespace
- value: "value-for-namespace1"
+ value: 2000 # The default value for this key is 0.
+ - constraints: {namespace: "namespace1"} # Sets limit on number of queries that can be sent from "namespace1" Namespace to the Persistence store.
+ value: 4000
- constraints: {namespace: "namespace2"}
- value: "value-for-namespace2"
+ value: 1000
```
- For keys that can be customized at a Task Queue level, you can specify Task Queue name and Task type in addition to Namespace.
- To set values at a Task Queue level, use `taskqueueName` (String) with `taskType` (optional; supported values: `Workflow` and `Activity`), as shown in the following exmaple.
+ To set values at a Task Queue level, use `taskQueueName` (String) with `taskType` (optional; supported values: `Workflow` and `Activity`).
+
+ For example if you have Workflow Executions creating a large number of Workflow and Activity tasks per second, you can add more partitions to your Task Queues (default is 4) to handle the high throughput of tasks.
+ To do this, add the following to your dynamic configuration file.
+ Note that if changing the number of partitions, you must set the same count for both read and write operations on Task Queues.
```yaml
- keyName:
- - constraints: {namespace: "namespace1", taskQueueName: "tq"} #Applies to both Workflow and Activity tasks on the "tq" Task Queue.
- value: 44
- - constraints: {namespace: "namespace1", taskQueueName: "other-tq", taskType: "Activity"} #Applies to Activity tasks on the "other_tq" Task Queue.
- value: 33
- - constraints: {namespace: "namespace2"} #Applies to all task queues in "namespace2".
- value: 22
+ matching.numTaskqueueReadPartitions: # Number of Task Queue partitions for read operations.
+ - constraints: {namespace: "namespace1", taskQueueName: "tq"} # Applies to the "tq" Task Queue for both Workflows and Activities.
+ value: 8 # The default value for this key is 4. Task Queues that need to support high traffic require higher number of partitions. Set these values in accordance to your poller count.
+ - constraints: {namespace: "namespace1", taskQueueName: "other-tq", taskType: "Activity"} # Applies to the "other_tq" Task Queue for Activities specifically.
+ value: 20
+ - constraints: {namespace: "namespace2"} # Applies to all task queues in "namespace2".
+ value: 10
+ - constraints: {} # Applies to all other task queues in "namespace1" and all other Namespaces.
+ value: 16
+ matching.numTaskqueueWritePartitions: # Number of Task Queue partitions for write operations.
+ - constraints: {namespace: "namespace1", taskQueueName: "tq"} # Applies to the "tq" Task Queue for both Workflows and Activities.
+ value: 8 # The default value for this key is 4. Task Queues that need to support high traffic require higher number of partitions. Set these values in accordance to your poller count.
+ - constraints: {namespace: "namespace1", taskQueueName: "other-tq", taskType: "Activity"} # Applies to the "other_tq" Task Queue for Activities specifically.
+ value: 20
+ - constraints: {namespace: "namespace2"} # Applies to all task queues in "namespace2".
+ value: 10
- constraints: {} # Applies to all other task queues in "namespace1" and all other Namespaces.
- value: 11
+ value: 16
```
-Note that these values are not applied first to last; values set on specific constraints override default values defined for that configuration key.
+
-For example to override the default maximum queries per second made to the Persistence database from the Frontend Service, add the following to your dynamic configuration file.
-
-```yaml
-#...
-frontend.persistenceMaxQPS:
- - value: 3000 # The default value for this key on the Frontend Service is 2000.
- constraints: {}
-#...
-```
-
-You can also set the maximum queries that can be made from a Namespace on the Frontend with the `frontend.PersistenceNamespaceMaxQPS` key.
-
-```yaml
-#...
-frontend.PersistenceNamespaceMaxQPS:
- - value: 3500 # The default value for this key on the Frontend Service is 2000.
- constraints:
- namespace: "your-namespace"
-#...
-```
-
-For examples on how dynamic configuration is set, see:
+For more examples on how dynamic configuration is set, see:
- [docker-compose](https://github.com/temporalio/docker-compose/tree/main/dynamicconfig)
- [samples-server](https://github.com/temporalio/samples-server/blob/main/tls/config/dynamicconfig/development.yaml)
@@ -132,85 +126,90 @@ For examples on how dynamic configuration is set, see:
The following table lists commonly used dynamic configuration keys that can be used for rate limiting requests to the Temporal Cluster.
-Note that the dynamic configuration key setting is optional. If you choose to update these values for your Temporal Cluster, ensure that you are provisioning enough resources to handle the load.
+Setting dynamic configuration keys is optional.
+If you choose to update these values for your Temporal Cluster, ensure that you are provisioning enough resources to handle the load.
-All values listed here are for Temporal server v1.20 .
+All values listed here are for Temporal server v1.20.
Check [server release notes](https://github.com/temporalio/temporal/releases) to verify any potential breaking changes when upgrading your versions.
### Service-level RPS limits
-The Rate Per Second (RPS) dynamic configuration keys set the rate at which requests can be made to each service in your Cluster.
+The Requests Per Second (RPS) dynamic configuration keys set the rate at which requests can be made to each service in your Cluster.
-When scaling your services, tune the RPS to test your workload and set acceptable provisioning benchmarks. Exceeding these limits will result in `ResourceExhaustedError`.
+When scaling your services, tune the RPS to test your workload and set acceptable provisioning benchmarks.
+Exceeding these limits results in `ResourceExhaustedError`.
-| Dynamic configuration key | Type | Description | Default value |
-| -------------------------------------- | ---- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- |
-| Frontend | | | |
-| `frontend.rps` | Int | Rate limit per second. This value applies to each Frontend service host. | 2400 |
-| `frontend.namespaceRPS` | Int | Rate limit per second applied for each Namespace. | 2400 |
-| `frontend.namespaceCount` | Int | Rate limit on concurrent Task Queue polls per Namespace per instance. | 1200 |
-| `frontend.globalNamespaceRPS` | Int | Namespace rate limit per second applied globally on the entire Cluster. The limit is evenly distributed among available Frontend service instances. If this is set, it overrides the per-instance limit (`frontend.namespaceRPS`). | 0 |
-| `internal-frontend.globalNamespaceRPS` | Int | Namespace rate limit per second across all internal-frontends. | 0 |
-| History | | | |
-| `history.rps` | Int | Request rate per second for each History host. | 3000 |
-| Matching | | | |
-| `matching.rps` | Int | Request rate per second for each Matching host | 1200 |
+| Dynamic configuration key | Type | Description | Default value |
+| -------------------------------------- | ---- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- |
+| Frontend | | | |
+| `frontend.rps` | Int | Rate limit (requests/second) for requests accepted by each Frontend Service host. | 2400 |
+| `frontend.namespaceRPS` | Int | Rate limit (requests/second) for requests accepted by each Namespace on the Frontend Service. | 2400 |
+| `frontend.namespaceCount` | Int | Limit on the number of concurrent Task Queue polls per Namespace per Frontend Service host. | 1200 |
+| `frontend.globalNamespaceRPS` | Int | Rate limit (requests/second) for requests accepted per Namespace, applied across Cluster. The limit is evenly distributed among available Frontend Service instances. If this is set, it overrides the per-instance limit (`frontend.namespaceRPS`). | 0 |
+| `internal-frontend.globalNamespaceRPS` | Int | Rate limit (requests/second) for requests accepted on each Internal-Frontend Service host applied across the Cluster. | 0 |
+| History | | | |
+| `history.rps` | Int | Rate limit (requests/second) for requests accepted by each History Service host. | 3000 |
+| Matching | | | |
+| `matching.rps` | Int | Rate limit (requests/second) for requests accepted by each Matching Service host. | 1200 |
+| `matching.numTaskqueueReadPartitions` | Int | Number of read partitions for a Task Queue. Must be set with `matching.numTaskqueueWritePartitions`. | 4 |
+| `matching.numTaskqueueWritePartitions` | Int | Number of write partitions for a Task Queue. | 4 |
-### QPS limits for Persistence database
+### QPS limits for Persistence store
The Queries Per Second (QPS) dynamic configuration keys set the maximum number of queries a service can make per second to the Persistence store.
-Persistence rate limits are evaluated synchronously. Adjust these keys according to your database capacity and workload.
-If the number of queries made to the Persistence database is more than what the dynamic configuration value set, you will see latencies and timeouts on your tasks.
-
-| Dynamic configuration key | Type | Description | Default value |
-| ----------------------------------------- | ---- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------- |
-| Frontend | | | |
-| `frontend.persistenceMaxQPS` | Int | Maximum queries per second that the Frontend service host can query the Persistence store database. | 2000 queries per second |
-| `frontend.persistenceNamespaceMaxQPS` | Int | Maximum queries per second that each Namespace on Frontend service host can query the Persistence store database.
If the value set for this config is less or equal to 0, the value set for `PersistenceMaxQS` will apply. | 0 |
-| History | | | |
-| `history.persistenceMaxQPS` | Int | Maximum queries per second that the History host can query the Persistence store database. | 9000 |
-| `history.persistenceNamespaceMaxQPS` | Int | Maximum queries per second that each Namespace on History host can query the Persistence store database.
If the value set for this config is less or equal to 0, the value set for `PersistenceMaxQS` will apply. | 0 |
-| Matching | | | |
-| `matching.persistenceMaxQPS` | Int | Maximum queries per second that the Matching service host can query the Persistence store database. | 9000 |
-| `matching.persistenceNamespaceMaxQPS` | Int | Maximum queries per second that the Matching host can query the Persistence store database for each Namespace.
If the value set for this config is less or equal to 0, the value set for `PersistenceMaxQS` will apply. | 0 |
-| Worker | | | |
-| `worker.persistenceMaxQPS` | Int | Maximum queries per second that the Worker service host can query the Persistence store database. | 100 |
-| `worker.persistenceNamespaceMaxQPS` | Int | Maximum queries per second that the Worker host can query the Persistence store database for each Namespace.
If the value set for this config is less or equal to 0, the value set for `PersistenceMaxQS` will apply. | 0 |
-| Visibility | | | |
-| `system.visibilityPersistenceMaxReadQPS` | Int | Maximum queries per second that Visibility database be queried for read operations. | 9000 |
-| `system.visibilityPersistenceMaxWriteQPS` | Int | Maximum queries per second that Visibility database be queried for write operations. | 9000 |
-
-#### Activity and Workflow default policy setting
+Persistence store rate limits are evaluated synchronously.
+Adjust these keys according to your database capacity and workload.
+If the number of queries made to the Persistence store exceeds the dynamic configuration value, you will see latencies and timeouts on your tasks.
+
+| Dynamic configuration key | Type | Description | Default value |
+| ----------------------------------------- | ---- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- |
+| Frontend | | | |
+| `frontend.persistenceMaxQPS` | Int | Maximum number queries per second that the Frontend Service host can send to the Persistence store. | 2000 |
+| `frontend.persistenceNamespaceMaxQPS` | Int | Maximum number of queries per second that each Namespace on the Frontend Service host can send to the Persistence store.
If the value set for this config is less than or equal to 0, the value set for `frontend.persistenceMaxQPS` will apply. | 0 |
+| History | | | |
+| `history.persistenceMaxQPS` | Int | Maximum number of queries per second that the History host can send to the Persistence store. | 9000 |
+| `history.persistenceNamespaceMaxQPS` | Int | Maximum number of queries per second for each Namespace that the History host can send to the Persistence store.
If the value set for this config is less than or equal to 0, then the value set for `history.persistenceMaxQPS` will apply. | 0 |
+| Matching | | | |
+| `matching.persistenceMaxQPS` | Int | Maximum number of queries per second that the Matching Service host can send to the Persistence store. | 9000 |
+| `matching.persistenceNamespaceMaxQPS` | Int | Maximum number of queries per second that the Matching host can send to the Persistence store for each Namespace.
If the value set for this config is less than or equal to 0, the value set for `matching.persistenceMaxQPS` will apply. | 0 |
+| Worker | | | |
+| `worker.persistenceMaxQPS` | Int | Maximum number of queries per second that the Worker Service host can send to the Persistence store. | 100 |
+| `worker.persistenceNamespaceMaxQPS` | Int | Maximum number of queries per second that the Worker host can send to the Persistence store for each Namespace.
If the value set for this config is less than or equal to 0, the value set for `worker.persistenceMaxQPS` will apply. | 0 |
+| Visibility | | | |
+| `system.visibilityPersistenceMaxReadQPS` | Int | Maximum number queries per second that Visibility database can receive for read operations. | 9000 |
+| `system.visibilityPersistenceMaxWriteQPS` | Int | Maximum number of queries per second that Visibility database can receive for write operations. | 9000 |
+
+### Activity and Workflow default policy setting
You can define default values for Activity and Workflow Retry Policies
What is a Retry Policy?
A Retry Policy is a collection of attributes that instructs the Temporal Server how to retry a failure of a Workflow Execution or an Activity Task Execution.
Learn more at the Cluster level with the following dynamic configuration keys.
-Th, the default values will apply. Also, setting custom retry polici
-| Dynamic configuration key | Type | Description | Default value |
-| ------------------------------------ | ----------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- |
-| `history.defaultActivityRetryPolicy` | Map (key-value pair elements) | Out-of-the-box server configuration for an activity retry policy when it is not explicitly set for the Activity in your code. | [Default values for retry Policy](/retry-policies#default-values-for-retry-policy) |
-| `history.defaultWorkflowRetryPolicy` | Map (key-value pair elements) | Out-of-box Retry Policy for unset fields where the user has set an explicit `RetryPolicy`, but not specified all the fields. | [Default values for retry Policy](/retry-policies#default-values-for-retry-policy) |
+| Dynamic configuration key | Type | Description | Default value |
+| ------------------------------------ | ----------------------------- | ----------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- |
+| `history.defaultActivityRetryPolicy` | Map (key-value pair elements) | Server configuration for an Activity Retry Policy when it is not explicitly set for the Activity in your code. | [Default values for retry Policy](/retry-policies#default-values-for-retry-policy) |
+| `history.defaultWorkflowRetryPolicy` | Map (key-value pair elements) | Retry Policy for unset fields where the user has set an explicit `RetryPolicy`, but not specified all the fields. | [Default values for retry Policy](/retry-policies#default-values-for-retry-policy) |
### Size limit settings
-The Persistence database in the Cluster has default size limits set for optimal performance. The dynamic configuration keys relating to some of these are listed below.
+The Persistence store in the Cluster has default size limits set for optimal performance. The dynamic configuration keys relating to some of these are listed below.
-The default values on these keys have been set based on extensive testing. While these values can be changed, ensure that you are provisioning enough database resources to handle the changed values.
+The default values on these keys are based on extensive testing.
+You can change these values, but ensure that you are provisioning enough database resources to handle the changed values.
For details on platform limits, see the [Temporal Platform limits sheet](/kb/temporal-platform-limits-sheet).
-| Dynamic configuration key | Type | Description | Default value |
-| --------------------------------------- | ---- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------- |
-| `limit.maxIDLength` | Int | Length limit for various IDs, including: `Namespace`, `TaskQueue`, `WorkflowID`, `ActivityID`, `TimerID`, `WorkflowType`, `ActivityType`, `SignalName`, `MarkerName`, `ErrorReason`/`FailureReason`/`CancelCause`, `Identity`, `RequestID` | 1000 |
-| `system.transactionSizeLimit` | Int | Largest allowed size for each history event batch persisted in the Persistence store. | 4 MB (`4 * 1024 * 1024`) |
-| `limit.blobSize.warn` | Int | Limit, in MBs, for BLOBs size in an event when a warning is thrown in the server logs. | 512 KB (`512 * 1024`) |
-| `limit.blobSize.error` | Int | Limit, in MBs, for BLOBs size in an event when an error occurs in the transaction. | 2 MB (`2 * 1024 * 1024`) |
-| `limit.historySize.warn` | Int | Limit, in MBs, at which a warning is thrown for the Workflow Execution Event History size. | 10 MB (`50*1024*1024`) |
-| `limit.historySize.error` | Int | Limit, in MBs, at which an error occurs in the Workflow Execution for exceeding allowed size. | 50 MB (`50*1024*1024`) |
-| `limit.historyCount.warn` | Int | Limit, in count, at which a warning is thrown for the Workflow Execution Event History size. | 10,240 events (`10*1024`) |
-| `limit.historyCount.error` | Int | Limit, in count, at which an error occurs in the Workflow Execution for exceeding allowed number of Events. | 51200 events (`50*1024`) |
-| `limit.numPendingActivities.error` | Int | Maximum number of pending Activities that a Workflow Execution can have before the `ScheduleActivityTask` fails with an error. | 50000 |
-| `limit.numPendingSignals.error` | Int | Maximum number of pending Signals that a Workflow Execution can have before the `SignalExternalWorkflowExecution` commands from this Workflow fail with an error. | 50000 |
-| `history.maximumSignalsPerExecution` | Int | Maximum number of Signals that a Workflow Execution can receive before it throws an `Invalid Argument` error. | 0 |
-| `limit.numPendingCancelRequests.error` | Int | Maximum number of pending requests to cancel other Workflows that a Workflow Execution can have before the `RequestCancelExternalWorkflowExecution` commands fail with an error. | 50000 |
-| `limit.numPendingChildExecutions.error` | Int | Maximum number of pending Child Workflows that a Workflow Execution can have before the `StartChildWorkflowExecution` commands fail with an error. | 2000 |
+| Dynamic configuration key | Type | Description | Default value |
+| --------------------------------------- | ---- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------- |
+| `limit.maxIDLength` | Int | Length limit for various Ids, including: `Namespace`, `TaskQueue`, `WorkflowID`, `ActivityID`, `TimerID`, `WorkflowType`, `ActivityType`, `SignalName`, `MarkerName`, `ErrorReason`/`FailureReason`/`CancelCause`, `Identity`, and `RequestID`. | 1000 |
+| `limit.blobSize.warn` | Int | Limit, in MBs, for BLOBs size in an Event when a warning is thrown in the server logs. | 512 KB (512 × 1024) |
+| `limit.blobSize.error` | Int | Limit, in MBs, for BLOBs size in an Event when an error occurs in the transaction. | 2 MB (2 × 1024 × 1024) |
+| `limit.historySize.warn` | Int | Limit, in MBs, at which a warning is thrown for the Workflow Execution Event History size. | 10 MB (10 × 1024 × 1024) |
+| `limit.historySize.error` | Int | Limit, in MBs, at which an error occurs in the Workflow Execution for exceeding allowed size. | 50 MB (50 × 1024 × 1024) |
+| `limit.historyCount.warn` | Int | Limit, in count, at which a warning is thrown for the Workflow Execution Event History size. | 10,240 Events |
+| `limit.historyCount.error` | Int | Limit, in count, at which an error occurs in the Workflow Execution for exceeding allowed number of Events. | 51,200 events |
+| `limit.numPendingActivities.error` | Int | Maximum number of pending Activities that a Workflow Execution can have before the `ScheduleActivityTask` fails with an error. | 50000 |
+| `limit.numPendingSignals.error` | Int | Maximum number of pending Signals that a Workflow Execution can have before the `SignalExternalWorkflowExecution` commands from this Workflow fail with an error. | 50000 |
+| `history.maximumSignalsPerExecution` | Int | Maximum number of Signals that a Workflow Execution can receive before it throws an `Invalid Argument` error. | 0 |
+| `limit.numPendingCancelRequests.error` | Int | Maximum number of pending requests to cancel other Workflows that a Workflow Execution can have before the `RequestCancelExternalWorkflowExecution` commands fail with an error. | 50000 |
+| `limit.numPendingChildExecutions.error` | Int | Maximum number of pending Child Workflows that a Workflow Execution can have before the `StartChildWorkflowExecution` commands fail with an error. | 2000 |
+