Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

release-20.2: sql: implement sequence caching and cached sequence serial normalization #64690

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docs/generated/settings/settings.html
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@
<tr><td><code>sql.defaults.idle_in_session_timeout</code></td><td>duration</td><td><code>0s</code></td><td>default value for the idle_in_session_timeout; default value for the idle_in_session_timeout session setting; controls the duration a session is permitted to idle before the session is terminated; if set to 0, there is no timeout</td></tr>
<tr><td><code>sql.defaults.idle_in_transaction_session_timeout</code></td><td>duration</td><td><code>0s</code></td><td>default value for the idle_in_transaction_session_timeout; controls the duration a session is permitted to idle in a transaction before the session is terminated; if set to 0, there is no timeout</td></tr>
<tr><td><code>sql.defaults.results_buffer.size</code></td><td>byte size</td><td><code>16 KiB</code></td><td>default size of the buffer that accumulates results for a statement or a batch of statements before they are sent to the client. This can be overridden on an individual connection with the 'results_buffer_size' parameter. Note that auto-retries generally only happen while no results have been delivered to the client, so reducing this size can increase the number of retriable errors a client receives. On the other hand, increasing the buffer size can increase the delay until the client receives the first result row. Updating the setting only affects new connections. Setting to 0 disables any buffering.</td></tr>
<tr><td><code>sql.defaults.serial_normalization</code></td><td>enumeration</td><td><code>rowid</code></td><td>default handling of SERIAL in table definitions [rowid = 0, virtual_sequence = 1, sql_sequence = 2]</td></tr>
<tr><td><code>sql.defaults.serial_normalization</code></td><td>enumeration</td><td><code>rowid</code></td><td>default handling of SERIAL in table definitions [rowid = 0, virtual_sequence = 1, sql_sequence = 2, sql_sequence_cached = 3]</td></tr>
<tr><td><code>sql.defaults.statement_timeout</code></td><td>duration</td><td><code>0s</code></td><td>default value for the statement_timeout; default value for the statement_timeout session setting; controls the duration a query is permitted to run before it is canceled; if set to 0, there is no timeout</td></tr>
<tr><td><code>sql.distsql.max_running_flows</code></td><td>integer</td><td><code>500</code></td><td>maximum number of concurrent flows that can be run on a node</td></tr>
<tr><td><code>sql.log.slow_query.experimental_full_table_scans.enabled</code></td><td>boolean</td><td><code>false</code></td><td>when set to true, statements that perform a full table/index scan will be logged to the slow query log even if they do not meet the latency threshold. Must have the slow query log enabled for this setting to have any effect.</td></tr>
Expand Down
53 changes: 53 additions & 0 deletions pkg/sql/alter_sequence.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,12 +54,65 @@ func (n *alterSequenceNode) startExec(params runParams) error {
telemetry.Inc(sqltelemetry.SchemaChangeAlterCounter("sequence"))
desc := n.seqDesc

oldMinValue := desc.SequenceOpts.MinValue
oldMaxValue := desc.SequenceOpts.MaxValue

err := assignSequenceOptions(
desc.SequenceOpts, n.n.Options, false /* setDefaults */, &params, desc.GetID(), desc.ParentID,
)
if err != nil {
return err
}
opts := desc.SequenceOpts
seqValueKey := params.p.ExecCfg().Codec.SequenceKey(uint32(desc.ID))
if err != nil {
return err
}

getSequenceValue := func() (int64, error) {
kv, err := params.p.txn.Get(params.ctx, seqValueKey)
if err != nil {
return 0, err
}
return kv.ValueInt(), nil
}

// Due to the semantics of sequence caching (see sql.planner.incrementSequenceUsingCache()),
// it is possible for a sequence to have a value that exceeds its MinValue or MaxValue. Users
// do no see values extending the sequence's bounds, and instead see "bounds exceeded" errors.
// To make a usable again after exceeding its bounds, there are two options:
// 1. The user changes the sequence's value by calling setval(...)
// 2. The user performs a schema change to alter the sequences MinValue or MaxValue. In this case, the
// value of the sequence must be restored to the original MinValue or MaxValue transactionally.
// The code below handles the second case.

// The sequence is decreasing and the minvalue is being decreased.
if opts.Increment < 0 && desc.SequenceOpts.MinValue < oldMinValue {
sequenceVal, err := getSequenceValue()
if err != nil {
return err
}

// If the sequence exceeded the old MinValue, it must be changed to start at the old MinValue.
if sequenceVal < oldMinValue {
err := params.p.txn.Put(params.ctx, seqValueKey, oldMinValue)
if err != nil {
return err
}
}
} else if opts.Increment > 0 && desc.SequenceOpts.MaxValue > oldMaxValue {
sequenceVal, err := getSequenceValue()
if err != nil {
return err
}

if sequenceVal > oldMaxValue {
err := params.p.txn.Put(params.ctx, seqValueKey, oldMaxValue)
if err != nil {
return err
}
}
}

if err := params.p.writeSchemaChange(
params.ctx, n.seqDesc, descpb.InvalidMutationID, tree.AsStringWithFQNames(n.n, params.Ann()),
Expand Down
16 changes: 16 additions & 0 deletions pkg/sql/catalog/descpb/structured.go
Original file line number Diff line number Diff line change
Expand Up @@ -306,6 +306,22 @@ func (opts *TableDescriptor_SequenceOpts) HasOwner() bool {
return !opts.SequenceOwner.Equal(TableDescriptor_SequenceOpts_SequenceOwner{})
}

// EffectiveCacheSize returns the CacheSize field of a sequence option with
// the exception that it will return 1 if the CacheSize field is 0.
// A cache size of 1 indicates that there is no caching. The returned value
// will always be greater than or equal to 1.
//
// Prior to #51259, sequence caching was unimplemented and cache sizes were
// left uninitialized (ie. to have a value of 0). If a sequence has a cache
// size of 0, it should be treated in the same was as sequences with cache
// sizes of 1.
func (opts *TableDescriptor_SequenceOpts) EffectiveCacheSize() int64 {
if opts.CacheSize == 0 {
return 1
}
return opts.CacheSize
}

// SafeValue implements the redact.SafeValue interface.
func (ConstraintValidity) SafeValue() {}

Expand Down
Loading