diff --git a/pkg/ccl/changefeedccl/changefeed.go b/pkg/ccl/changefeedccl/changefeed.go index 1e1a57d7576b..02ce8d4f3f90 100644 --- a/pkg/ccl/changefeedccl/changefeed.go +++ b/pkg/ccl/changefeedccl/changefeed.go @@ -17,6 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/util/bufalloc" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -71,7 +72,7 @@ func kvsToRows( ) func(context.Context) ([]emitEntry, error) { rfCache := newRowFetcherCache(leaseMgr) - var kvs sqlbase.SpanKVFetcher + var kvs row.SpanKVFetcher appendEmitEntryForKV := func( ctx context.Context, output []emitEntry, kv roachpb.KeyValue, schemaTimestamp hlc.Timestamp, ) ([]emitEntry, error) { diff --git a/pkg/ccl/changefeedccl/rowfetcher_cache.go b/pkg/ccl/changefeedccl/rowfetcher_cache.go index eb1c235a2e55..110adb742378 100644 --- a/pkg/ccl/changefeedccl/rowfetcher_cache.go +++ b/pkg/ccl/changefeedccl/rowfetcher_cache.go @@ -13,6 +13,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/encoding" @@ -21,12 +22,12 @@ import ( // rowFetcherCache maintains a cache of single table RowFetchers. Given a key // with an mvcc timestamp, it retrieves the correct TableDescriptor for that key -// and returns a RowFetcher initialized with that table. This RowFetcher's +// and returns a Fetcher initialized with that table. This Fetcher's // StartScanFrom can be used to turn that key (or all the keys making up the // column families of one row) into a row. type rowFetcherCache struct { leaseMgr *sql.LeaseManager - fetchers map[*sqlbase.TableDescriptor]*sqlbase.RowFetcher + fetchers map[*sqlbase.TableDescriptor]*row.Fetcher a sqlbase.DatumAlloc } @@ -34,7 +35,7 @@ type rowFetcherCache struct { func newRowFetcherCache(leaseMgr *sql.LeaseManager) *rowFetcherCache { return &rowFetcherCache{ leaseMgr: leaseMgr, - fetchers: make(map[*sqlbase.TableDescriptor]*sqlbase.RowFetcher), + fetchers: make(map[*sqlbase.TableDescriptor]*row.Fetcher), } } @@ -80,7 +81,7 @@ func (c *rowFetcherCache) TableDescForKey( func (c *rowFetcherCache) RowFetcherForTableDesc( tableDesc *sqlbase.TableDescriptor, -) (*sqlbase.RowFetcher, error) { +) (*row.Fetcher, error) { if rf, ok := c.fetchers[tableDesc]; ok { return rf, nil } @@ -93,10 +94,10 @@ func (c *rowFetcherCache) RowFetcherForTableDesc( valNeededForCol.Add(colIdx) } - var rf sqlbase.RowFetcher + var rf row.Fetcher if err := rf.Init( false /* reverse */, false /* returnRangeInfo */, false /* isCheck */, &c.a, - sqlbase.RowFetcherTableArgs{ + row.FetcherTableArgs{ Spans: tableDesc.AllIndexSpans(), Desc: tableDesc, Index: &tableDesc.PrimaryIndex, diff --git a/pkg/ccl/importccl/import_stmt.go b/pkg/ccl/importccl/import_stmt.go index 893c2c5ce10a..c07585431af9 100644 --- a/pkg/ccl/importccl/import_stmt.go +++ b/pkg/ccl/importccl/import_stmt.go @@ -35,6 +35,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -363,10 +364,8 @@ func (r fkResolver) LookupSchema( } // Implements the sql.SchemaResolver interface. -func (r fkResolver) LookupTableByID( - ctx context.Context, id sqlbase.ID, -) (sqlbase.TableLookup, error) { - return sqlbase.TableLookup{}, errSchemaResolver +func (r fkResolver) LookupTableByID(ctx context.Context, id sqlbase.ID) (row.TableLookup, error) { + return row.TableLookup{}, errSchemaResolver } const csvDatabaseName = "csv" diff --git a/pkg/ccl/importccl/load.go b/pkg/ccl/importccl/load.go index 503e842a0b0c..b3c75a02c26e 100644 --- a/pkg/ccl/importccl/load.go +++ b/pkg/ccl/importccl/load.go @@ -28,6 +28,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/parser" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/transform" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -97,7 +98,7 @@ func Load( var currentCmd bytes.Buffer scanner := bufio.NewReader(r) - var ri sqlbase.RowInserter + var ri row.Inserter var defaultExprs []tree.TypedExpr var cols []sqlbase.ColumnDescriptor var tableDesc *sqlbase.TableDescriptor @@ -181,7 +182,7 @@ func Load( } } - ri, err = sqlbase.MakeRowInserter(nil, tableDesc, nil, tableDesc.Columns, + ri, err = row.MakeInserter(nil, tableDesc, nil, tableDesc.Columns, true, &sqlbase.DatumAlloc{}) if err != nil { return backupccl.BackupDescriptor{}, errors.Wrap(err, "make row inserter") @@ -256,7 +257,7 @@ func insertStmtToKVs( defaultExprs []tree.TypedExpr, cols []sqlbase.ColumnDescriptor, evalCtx tree.EvalContext, - ri sqlbase.RowInserter, + ri row.Inserter, stmt *tree.Insert, f func(roachpb.KeyValue), ) error { @@ -293,10 +294,10 @@ func insertStmtToKVs( Cols: tableDesc.Columns, } for _, tuple := range values.Rows { - row := make([]tree.Datum, len(tuple)) + insertRow := make([]tree.Datum, len(tuple)) for i, expr := range tuple { if expr == tree.DNull { - row[i] = tree.DNull + insertRow[i] = tree.DNull continue } c, ok := expr.(tree.Constant) @@ -304,7 +305,7 @@ func insertStmtToKVs( return errors.Errorf("unsupported expr: %q", expr) } var err error - row[i], err = c.ResolveAsType(nil, tableDesc.Columns[i].Type.ToDatumType()) + insertRow[i], err = c.ResolveAsType(nil, tableDesc.Columns[i].Type.ToDatumType()) if err != nil { return err } @@ -314,16 +315,16 @@ func insertStmtToKVs( var computeExprs []tree.TypedExpr var computedCols []sqlbase.ColumnDescriptor - row, err := sql.GenerateInsertRow( - defaultExprs, computeExprs, cols, computedCols, evalCtx, tableDesc, row, &computedIVarContainer, + insertRow, err := sql.GenerateInsertRow( + defaultExprs, computeExprs, cols, computedCols, evalCtx, tableDesc, insertRow, &computedIVarContainer, ) if err != nil { - return errors.Wrapf(err, "process insert %q", row) + return errors.Wrapf(err, "process insert %q", insertRow) } // TODO(bram): Is the checking of FKs here required? If not, turning them // off may provide a speed boost. - if err := ri.InsertRow(ctx, b, row, true, sqlbase.CheckFKs, false /* traceKV */); err != nil { - return errors.Wrapf(err, "insert %q", row) + if err := ri.InsertRow(ctx, b, insertRow, true, row.CheckFKs, false /* traceKV */); err != nil { + return errors.Wrapf(err, "insert %q", insertRow) } } return nil diff --git a/pkg/ccl/importccl/read_import_proc.go b/pkg/ccl/importccl/read_import_proc.go index df03ff80735a..2cf4a40806a1 100644 --- a/pkg/ccl/importccl/read_import_proc.go +++ b/pkg/ccl/importccl/read_import_proc.go @@ -29,6 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/distsqlrun" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/util/ctxgroup" @@ -200,7 +201,7 @@ type rowConverter struct { // The rest of these are derived from tableDesc, just cached here. hidden int - ri sqlbase.RowInserter + ri row.Inserter evalCtx *tree.EvalContext cols []sqlbase.ColumnDescriptor visibleCols []sqlbase.ColumnDescriptor @@ -220,7 +221,7 @@ func newRowConverter( evalCtx: evalCtx, } - ri, err := sqlbase.MakeRowInserter(nil /* txn */, tableDesc, nil, /* fkTables */ + ri, err := row.MakeInserter(nil /* txn */, tableDesc, nil, /* fkTables */ tableDesc.Columns, false /* checkFKs */, &sqlbase.DatumAlloc{}) if err != nil { return nil, errors.Wrap(err, "make row inserter") @@ -293,7 +294,7 @@ func (c *rowConverter) row(ctx context.Context, fileIndex int32, rowIndex int64) var computeExprs []tree.TypedExpr var computedCols []sqlbase.ColumnDescriptor - row, err := sql.GenerateInsertRow( + insertRow, err := sql.GenerateInsertRow( c.defaultExprs, computeExprs, c.cols, computedCols, *c.evalCtx, c.tableDesc, c.datums, &c.computedIVarContainer) if err != nil { return errors.Wrapf(err, "generate insert row") @@ -304,9 +305,9 @@ func (c *rowConverter) row(ctx context.Context, fileIndex int32, rowIndex int64) kv.Value.InitChecksum(kv.Key) c.kvBatch = append(c.kvBatch, kv) }), - row, + insertRow, true, /* ignoreConflicts */ - sqlbase.SkipFKs, + row.SkipFKs, false, /* traceKV */ ); err != nil { return errors.Wrapf(err, "insert row") diff --git a/pkg/server/settingsworker.go b/pkg/server/settingsworker.go index 0bf5f2e78e79..13486922ca35 100644 --- a/pkg/server/settingsworker.go +++ b/pkg/server/settingsworker.go @@ -21,6 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/util/encoding" @@ -34,7 +35,7 @@ func (s *Server) refreshSettings() { a := &sqlbase.DatumAlloc{} settingsTablePrefix := keys.MakeTablePrefix(uint32(tbl.ID)) - colIdxMap := sqlbase.ColIDtoRowIndexFromCols(tbl.Columns) + colIdxMap := row.ColIDtoRowIndexFromCols(tbl.Columns) processKV := func(ctx context.Context, kv roachpb.KeyValue, u settings.Updater) error { if !bytes.HasPrefix(kv.Key, settingsTablePrefix) { diff --git a/pkg/sql/backfill.go b/pkg/sql/backfill.go index 8d2ef303dcb7..7ff41df96ecb 100644 --- a/pkg/sql/backfill.go +++ b/pkg/sql/backfill.go @@ -27,6 +27,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/backfill" "github.com/cockroachdb/cockroach/pkg/sql/distsqlrun" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -216,7 +217,7 @@ func (sc *SchemaChanger) truncateIndexes( alloc := &sqlbase.DatumAlloc{} for _, desc := range dropped { var resume roachpb.Span - for row, done := int64(0), false; !done; row += chunkSize { + for rowIdx, done := int64(0), false; !done; rowIdx += chunkSize { // First extend the schema change lease. if err := sc.ExtendLease(ctx, lease); err != nil { return err @@ -225,7 +226,7 @@ func (sc *SchemaChanger) truncateIndexes( resumeAt := resume if log.V(2) { log.Infof(ctx, "drop index (%d, %d) at row: %d, span: %s", - sc.tableID, sc.mutationID, row, resume) + sc.tableID, sc.mutationID, rowIdx, resume) } if err := sc.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { if fn := sc.execCfg.DistSQLRunTestingKnobs.RunBeforeBackfillChunk; fn != nil { @@ -244,8 +245,8 @@ func (sc *SchemaChanger) truncateIndexes( return err } - rd, err := sqlbase.MakeRowDeleter( - txn, tableDesc, nil, nil, sqlbase.SkipFKs, nil /* *tree.EvalContext */, alloc, + rd, err := row.MakeDeleter( + txn, tableDesc, nil, nil, row.SkipFKs, nil /* *tree.EvalContext */, alloc, ) if err != nil { return err @@ -400,12 +401,12 @@ func (sc *SchemaChanger) distBackfill( // backfiller processor. var otherTableDescs []sqlbase.TableDescriptor if backfillType == columnBackfill { - fkTables, err := sqlbase.TablesNeededForFKs( + fkTables, err := row.TablesNeededForFKs( ctx, *tableDesc, - sqlbase.CheckUpdates, - sqlbase.NoLookup, - sqlbase.NoCheckPrivilege, + row.CheckUpdates, + row.NoLookup, + row.NoCheckPrivilege, nil, /* AnalyzeExprFunction */ ) if err != nil { @@ -592,12 +593,12 @@ func columnBackfillInTxn( // otherTableDescs contains any other table descriptors required by the // backfiller processor. var otherTableDescs []sqlbase.TableDescriptor - fkTables, err := sqlbase.TablesNeededForFKs( + fkTables, err := row.TablesNeededForFKs( ctx, *tableDesc, - sqlbase.CheckUpdates, - sqlbase.NoLookup, - sqlbase.NoCheckPrivilege, + row.CheckUpdates, + row.NoLookup, + row.NoCheckPrivilege, nil, /* AnalyzeExprFunction */ ) if err != nil { @@ -656,8 +657,8 @@ func indexTruncateInTxn( idx := tableDesc.Mutations[0].GetIndex() var sp roachpb.Span for done := false; !done; done = sp.Key == nil { - rd, err := sqlbase.MakeRowDeleter( - txn, tableDesc, nil, nil, sqlbase.SkipFKs, nil /* *tree.EvalContext */, alloc, + rd, err := row.MakeDeleter( + txn, tableDesc, nil, nil, row.SkipFKs, nil /* *tree.EvalContext */, alloc, ) if err != nil { return err diff --git a/pkg/sql/backfill/backfill.go b/pkg/sql/backfill/backfill.go index e652dbc2a00e..96461b8fc4f1 100644 --- a/pkg/sql/backfill/backfill.go +++ b/pkg/sql/backfill/backfill.go @@ -21,6 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/transform" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -47,7 +48,7 @@ func IndexMutationFilter(m sqlbase.DescriptorMutation) bool { // backfiller is common to a ColumnBackfiller or an IndexBackfiller. type backfiller struct { - fetcher sqlbase.RowFetcher + fetcher row.Fetcher alloc sqlbase.DatumAlloc } @@ -113,7 +114,7 @@ func (cb *ColumnBackfiller) Init(evalCtx *tree.EvalContext, desc sqlbase.TableDe var valNeededForCol util.FastIntSet valNeededForCol.AddRange(0, len(desc.Columns)-1) - tableArgs := sqlbase.RowFetcherTableArgs{ + tableArgs := row.FetcherTableArgs{ Desc: &desc, Index: &desc.PrimaryIndex, ColIdxMap: desc.ColumnIdxMap(), @@ -137,12 +138,12 @@ func (cb *ColumnBackfiller) RunColumnBackfillChunk( alsoCommit bool, traceKV bool, ) (roachpb.Key, error) { - fkTables, _ := sqlbase.TablesNeededForFKs( + fkTables, _ := row.TablesNeededForFKs( ctx, tableDesc, - sqlbase.CheckUpdates, - sqlbase.NoLookup, - sqlbase.NoCheckPrivilege, + row.CheckUpdates, + row.NoLookup, + row.NoCheckPrivilege, nil, /* AnalyzeExprFunction */ ) for i, fkTableDesc := range otherTables { @@ -166,13 +167,13 @@ func (cb *ColumnBackfiller) RunColumnBackfillChunk( requestedCols := make([]sqlbase.ColumnDescriptor, 0, len(tableDesc.Columns)+len(cb.added)) requestedCols = append(requestedCols, tableDesc.Columns...) requestedCols = append(requestedCols, cb.added...) - ru, err := sqlbase.MakeRowUpdater( + ru, err := row.MakeUpdater( txn, &tableDesc, fkTables, cb.updateCols, requestedCols, - sqlbase.RowUpdaterOnlyColumns, + row.UpdaterOnlyColumns, cb.evalCtx, &cb.alloc, ) @@ -243,7 +244,7 @@ func (cb *ColumnBackfiller) RunColumnBackfillChunk( } } if _, err := ru.UpdateRow( - ctx, b, oldValues, updateValues, sqlbase.CheckFKs, traceKV, + ctx, b, oldValues, updateValues, row.CheckFKs, traceKV, ); err != nil { return roachpb.Key{}, err } @@ -279,7 +280,7 @@ func ConvertBackfillError( return errors.Wrap(err, "backfill error") } } - return sqlbase.ConvertBatchError(ctx, desc, b) + return row.ConvertBatchError(ctx, desc, b) } // IndexBackfiller is capable of backfilling all the added index. @@ -335,7 +336,7 @@ func (ib *IndexBackfiller) Init(desc sqlbase.TableDescriptor) error { ib.colIdxMap[c.ID] = i } - tableArgs := sqlbase.RowFetcherTableArgs{ + tableArgs := row.FetcherTableArgs{ Desc: &desc, Index: &desc.PrimaryIndex, ColIdxMap: ib.colIdxMap, diff --git a/pkg/sql/create_table.go b/pkg/sql/create_table.go index ddfafca679fd..64a08fe50d96 100644 --- a/pkg/sql/create_table.go +++ b/pkg/sql/create_table.go @@ -28,6 +28,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/privilege" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sem/types" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -214,8 +215,8 @@ func (n *createTableNode) startExec(params runParams) error { // Instantiate a row inserter and table writer. It has a 1-1 // mapping to the definitions in the descriptor. - ri, err := sqlbase.MakeRowInserter( - params.p.txn, &desc, nil, desc.Columns, sqlbase.SkipFKs, ¶ms.p.alloc) + ri, err := row.MakeInserter( + params.p.txn, &desc, nil, desc.Columns, row.SkipFKs, ¶ms.p.alloc) if err != nil { return err } diff --git a/pkg/sql/delete.go b/pkg/sql/delete.go index 42815ca7c38f..6ba4722a2db5 100644 --- a/pkg/sql/delete.go +++ b/pkg/sql/delete.go @@ -20,6 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/privilege" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sem/types" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -97,10 +98,10 @@ func (p *planner) Delete( } // Determine what are the foreign key tables that are involved in the deletion. - fkTables, err := sqlbase.TablesNeededForFKs( + fkTables, err := row.TablesNeededForFKs( ctx, *desc, - sqlbase.CheckDeletes, + row.CheckDeletes, p.LookupTableByID, p.CheckPrivilege, p.analyzeExpr, @@ -128,8 +129,8 @@ func (p *planner) Delete( } // Create the table deleter, which does the bulk of the work. - rd, err := sqlbase.MakeRowDeleter( - p.txn, desc, fkTables, requestedCols, sqlbase.CheckFKs, p.EvalContext(), &p.alloc, + rd, err := row.MakeDeleter( + p.txn, desc, fkTables, requestedCols, row.CheckFKs, p.EvalContext(), &p.alloc, ) if err != nil { return nil, err @@ -355,7 +356,7 @@ func (d *deleteNode) FastPathResults() (int, bool) { return d.run.rowCount, d.run.fastPath } -func canDeleteFastInterleaved(table TableDescriptor, fkTables sqlbase.TableLookupsByID) bool { +func canDeleteFastInterleaved(table TableDescriptor, fkTables row.TableLookupsByID) bool { // If there are no interleaved tables then don't take the fast path. // This avoids superfluous use of DelRange in cases where there isn't as much of a performance boost. hasInterleaved := false diff --git a/pkg/sql/delete_test.go b/pkg/sql/delete_test.go index b002792310c1..161d2facec41 100644 --- a/pkg/sql/delete_test.go +++ b/pkg/sql/delete_test.go @@ -22,6 +22,7 @@ import ( "github.com/pkg/errors" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" @@ -119,20 +120,20 @@ CREATE TABLE IF NOT EXISTS child_with_index( } } - lookup := func(ctx context.Context, tableID sqlbase.ID) (sqlbase.TableLookup, error) { + lookup := func(ctx context.Context, tableID sqlbase.ID) (row.TableLookup, error) { table, exists := tablesByID[tableID] if !exists { - return sqlbase.TableLookup{}, errors.Errorf("Could not lookup table:%d", tableID) + return row.TableLookup{}, errors.Errorf("Could not lookup table:%d", tableID) } - return sqlbase.TableLookup{Table: table}, nil + return row.TableLookup{Table: table}, nil } - fkTables, err := sqlbase.TablesNeededForFKs( + fkTables, err := row.TablesNeededForFKs( context.TODO(), *pd, - sqlbase.CheckDeletes, + row.CheckDeletes, lookup, - sqlbase.NoCheckPrivilege, + row.NoCheckPrivilege, nil, /* AnalyzeExprFunction */ ) if err != nil { diff --git a/pkg/sql/distsqlrun/indexjoiner.go b/pkg/sql/distsqlrun/indexjoiner.go index c4f88d693d73..8fa8a429a170 100644 --- a/pkg/sql/distsqlrun/indexjoiner.go +++ b/pkg/sql/distsqlrun/indexjoiner.go @@ -18,6 +18,7 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/scrub" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/util/tracing" @@ -40,7 +41,7 @@ type indexJoiner struct { // to get rows from the fetcher. This enables the indexJoiner to wrap the // fetcherInput with a stat collector when necessary. fetcherInput RowSource - fetcher sqlbase.RowFetcher + fetcher row.Fetcher // fetcherReady indicates that we have started an index scan and there are // potentially more rows to retrieve. fetcherReady bool @@ -112,7 +113,7 @@ func newIndexJoiner( ); err != nil { return nil, err } - ij.fetcherInput = &rowFetcherWrapper{RowFetcher: &ij.fetcher} + ij.fetcherInput = &rowFetcherWrapper{Fetcher: &ij.fetcher} if sp := opentracing.SpanFromContext(flowCtx.EvalCtx.Ctx()); sp != nil && tracing.IsRecording(sp) { // Enable stats collection. diff --git a/pkg/sql/distsqlrun/interleaved_reader_joiner.go b/pkg/sql/distsqlrun/interleaved_reader_joiner.go index f97a59d5c17a..cb4a52722370 100644 --- a/pkg/sql/distsqlrun/interleaved_reader_joiner.go +++ b/pkg/sql/distsqlrun/interleaved_reader_joiner.go @@ -20,6 +20,7 @@ import ( "github.com/pkg/errors" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/scrub" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -63,7 +64,7 @@ type interleavedReaderJoiner struct { allSpans roachpb.Spans limitHint int64 - fetcher sqlbase.RowFetcher + fetcher row.Fetcher alloc sqlbase.DatumAlloc // TODO(richardwu): If we need to buffer more than 1 ancestor row for @@ -296,7 +297,7 @@ func newInterleavedReaderJoiner( tables := make([]tableInfo, len(spec.Tables)) // We need to take spans from all tables and merge them together - // for RowFetcher. + // for Fetcher. allSpans := make(roachpb.Spans, 0, len(spec.Tables)) // We need to figure out which table is the ancestor. @@ -389,7 +390,7 @@ func newInterleavedReaderJoiner( func (irj *interleavedReaderJoiner) initRowFetcher( tables []InterleavedReaderJoinerSpec_Table, reverseScan bool, alloc *sqlbase.DatumAlloc, ) error { - args := make([]sqlbase.RowFetcherTableArgs, len(tables)) + args := make([]row.FetcherTableArgs, len(tables)) for i, table := range tables { desc := table.Desc diff --git a/pkg/sql/distsqlrun/joinreader.go b/pkg/sql/distsqlrun/joinreader.go index 46a86f4363a7..3b4cb60a57fa 100644 --- a/pkg/sql/distsqlrun/joinreader.go +++ b/pkg/sql/distsqlrun/joinreader.go @@ -23,6 +23,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/scrub" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/util" @@ -71,7 +72,7 @@ type joinReader struct { // to get rows from the fetcher. This enables the joinReader to wrap the // fetcherInput with a stat collector when necessary. fetcherInput RowSource - fetcher sqlbase.RowFetcher + fetcher row.Fetcher indexKeyPrefix []byte alloc sqlbase.DatumAlloc rowAlloc sqlbase.EncDatumRowAlloc @@ -94,7 +95,7 @@ type joinReader struct { // primaryFetcherInput wraps primaryFetcher in a RowSource implementation for // the same reason that fetcher is wrapped. primaryFetcherInput RowSource - primaryFetcher *sqlbase.RowFetcher + primaryFetcher *row.Fetcher primaryColumnTypes []sqlbase.ColumnType primaryKeyPrefix []byte @@ -208,7 +209,7 @@ func newJoinReader( // secondary index, then do a second lookup on the primary index to get the // needed output columns. neededIndexColumns = getIndexColSet(&jr.desc.PrimaryIndex, jr.colIdxMap) - jr.primaryFetcher = &sqlbase.RowFetcher{} + jr.primaryFetcher = &row.Fetcher{} _, _, err = initRowFetcher( jr.primaryFetcher, &jr.desc, 0 /* indexIdx */, jr.colIdxMap, false, /* reverse */ jr.neededRightCols(), false /* isCheck */, &jr.alloc, @@ -223,7 +224,7 @@ func newJoinReader( } jr.primaryKeyPrefix = sqlbase.MakeIndexKeyPrefix(&jr.desc, jr.desc.PrimaryIndex.ID) - jr.primaryFetcherInput = &rowFetcherWrapper{RowFetcher: jr.primaryFetcher} + jr.primaryFetcherInput = &rowFetcherWrapper{Fetcher: jr.primaryFetcher} if collectingStats { jr.primaryFetcherInput = NewInputStatCollector(jr.primaryFetcherInput) } @@ -236,7 +237,7 @@ func newJoinReader( if err != nil { return nil, err } - jr.fetcherInput = &rowFetcherWrapper{RowFetcher: &jr.fetcher} + jr.fetcherInput = &rowFetcherWrapper{Fetcher: &jr.fetcher} if collectingStats { jr.input = NewInputStatCollector(jr.input) jr.fetcherInput = NewInputStatCollector(jr.fetcherInput) diff --git a/pkg/sql/distsqlrun/scrub_tablereader.go b/pkg/sql/distsqlrun/scrub_tablereader.go index a6d22b49ba13..e084ca30ee77 100644 --- a/pkg/sql/distsqlrun/scrub_tablereader.go +++ b/pkg/sql/distsqlrun/scrub_tablereader.go @@ -46,7 +46,7 @@ var ScrubTypes = []sqlbase.ColumnType{ type scrubTableReader struct { tableReader tableDesc sqlbase.TableDescriptor - // fetcherResultToColIdx maps RowFetcher results to the column index in + // fetcherResultToColIdx maps Fetcher results to the column index in // the TableDescriptor. This is only initialized and used during scrub // physical checks. fetcherResultToColIdx []int @@ -92,7 +92,7 @@ func newScrubTableReader( nil, /* memMonitor */ ProcStateOpts{ // We don't pass tr.input as an inputToDrain; tr.input is just an adapter - // on top of a RowFetcher; draining doesn't apply to it. Moreover, Andrei + // on top of a Fetcher; draining doesn't apply to it. Moreover, Andrei // doesn't trust that the adapter will do the right thing on a Next() call // after it had previously returned an error. InputsToDrain: nil, diff --git a/pkg/sql/distsqlrun/tablereader.go b/pkg/sql/distsqlrun/tablereader.go index 7d0ddf757fb1..74ed9cd3673f 100644 --- a/pkg/sql/distsqlrun/tablereader.go +++ b/pkg/sql/distsqlrun/tablereader.go @@ -16,13 +16,13 @@ package distsqlrun import ( "context" + "sync" "github.com/opentracing/opentracing-go" "github.com/pkg/errors" - "sync" - "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -43,9 +43,9 @@ type tableReader struct { // input is really the fetcher below, possibly wrapped in a stats generator. input RowSource - // fetcher is the underlying RowFetcher, should only be used for + // fetcher is the underlying Fetcher, should only be used for // initialization, call input.Next() to retrieve rows once initialized. - fetcher sqlbase.RowFetcher + fetcher row.Fetcher alloc sqlbase.DatumAlloc } @@ -97,7 +97,7 @@ func newTableReader( nil, /* memMonitor */ ProcStateOpts{ // We don't pass tr.input as an inputToDrain; tr.input is just an adapter - // on top of a RowFetcher; draining doesn't apply to it. Moreover, Andrei + // on top of a Fetcher; draining doesn't apply to it. Moreover, Andrei // doesn't trust that the adapter will do the right thing on a Next() call // after it had previously returned an error. InputsToDrain: nil, @@ -126,7 +126,7 @@ func newTableReader( for i, s := range spec.Spans { tr.spans[i] = s.Span } - tr.input = &rowFetcherWrapper{RowFetcher: &tr.fetcher} + tr.input = &rowFetcherWrapper{Fetcher: &tr.fetcher} if sp := opentracing.SpanFromContext(flowCtx.EvalCtx.Ctx()); sp != nil && tracing.IsRecording(sp) { tr.input = NewInputStatCollector(tr.input) @@ -137,10 +137,10 @@ func newTableReader( } // rowFetcherWrapper is used only by a tableReader to wrap calls to -// RowFetcher.NextRow() in a RowSource implementation. +// Fetcher.NextRow() in a RowSource implementation. type rowFetcherWrapper struct { ctx context.Context - *sqlbase.RowFetcher + *row.Fetcher } var _ RowSource = &rowFetcherWrapper{} @@ -151,7 +151,7 @@ func (w *rowFetcherWrapper) Start(ctx context.Context) context.Context { return ctx } -// Next() calls NextRow() on the underlying RowFetcher. If the returned +// Next() calls NextRow() on the underlying Fetcher. If the returned // ProducerMetadata is not nil, only its Err field will be set. func (w *rowFetcherWrapper) Next() (sqlbase.EncDatumRow, *ProducerMetadata) { row, _, _, err := w.NextRow(w.ctx) @@ -165,7 +165,7 @@ func (w rowFetcherWrapper) ConsumerDone() {} func (w rowFetcherWrapper) ConsumerClosed() {} func initRowFetcher( - fetcher *sqlbase.RowFetcher, + fetcher *row.Fetcher, desc *sqlbase.TableDescriptor, indexIdx int, colIdxMap map[sqlbase.ColumnID]int, @@ -196,7 +196,7 @@ func initRowFetcher( } } } - tableArgs := sqlbase.RowFetcherTableArgs{ + tableArgs := row.FetcherTableArgs{ Desc: desc, Index: index, ColIdxMap: colIdxMap, diff --git a/pkg/sql/distsqlrun/tablereader_test.go b/pkg/sql/distsqlrun/tablereader_test.go index 08877fd9b038..2a1fff067cf5 100644 --- a/pkg/sql/distsqlrun/tablereader_test.go +++ b/pkg/sql/distsqlrun/tablereader_test.go @@ -295,7 +295,7 @@ ALTER TABLE t EXPERIMENTAL_RELOCATE VALUES (ARRAY[2], 1), (ARRAY[1], 2), (ARRAY[ } // Test that a scan with a limit doesn't touch more ranges than necessary (i.e. -// we properly set the limit on the underlying RowFetcher/KVFetcher). +// we properly set the limit on the underlying Fetcher/KVFetcher). func TestLimitScans(t *testing.T) { defer leaktest.AfterTest(t)() ctx := context.Background() diff --git a/pkg/sql/distsqlrun/zigzagjoiner.go b/pkg/sql/distsqlrun/zigzagjoiner.go index 07f7fe5c247d..666c262f9f18 100644 --- a/pkg/sql/distsqlrun/zigzagjoiner.go +++ b/pkg/sql/distsqlrun/zigzagjoiner.go @@ -21,6 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/scrub" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -326,7 +327,7 @@ func (z *zigzagJoiner) Start(ctx context.Context) context.Context { // zigzagJoinerInfo contains all the information that needs to be // stored for each side of the join. type zigzagJoinerInfo struct { - fetcher sqlbase.RowFetcher + fetcher row.Fetcher alloc *sqlbase.DatumAlloc table *sqlbase.TableDescriptor index *sqlbase.IndexDescriptor @@ -390,7 +391,7 @@ func (z *zigzagJoiner) setupInfo(spec *ZigzagJoinerSpec, side int, colOffset int // Setup the RowContainers. info.container.Reset() - // Setup the RowFetcher. + // Setup the Fetcher. _, _, err := initRowFetcher( &(info.fetcher), info.table, diff --git a/pkg/sql/insert.go b/pkg/sql/insert.go index 0dab272c0297..c5d8ce0b5ff2 100644 --- a/pkg/sql/insert.go +++ b/pkg/sql/insert.go @@ -23,6 +23,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/privilege" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sem/types" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -105,13 +106,13 @@ func (p *planner) Insert( } // Determine what are the foreign key tables that are involved in the update. - var fkCheckType sqlbase.FKCheck + var fkCheckType row.FKCheck if n.OnConflict == nil || n.OnConflict.DoNothing { - fkCheckType = sqlbase.CheckInserts + fkCheckType = row.CheckInserts } else { - fkCheckType = sqlbase.CheckUpdates + fkCheckType = row.CheckUpdates } - fkTables, err := sqlbase.TablesNeededForFKs( + fkTables, err := row.TablesNeededForFKs( ctx, *desc, fkCheckType, @@ -268,8 +269,8 @@ func (p *planner) Insert( } // Create the table insert, which does the bulk of the work. - ri, err := sqlbase.MakeRowInserter(p.txn, desc, fkTables, insertCols, - sqlbase.CheckFKs, &p.alloc) + ri, err := row.MakeInserter(p.txn, desc, fkTables, insertCols, + row.CheckFKs, &p.alloc) if err != nil { return nil, err } diff --git a/pkg/sql/logictest/logic.go b/pkg/sql/logictest/logic.go index 5083f57e2025..484ba0cb7f02 100644 --- a/pkg/sql/logictest/logic.go +++ b/pkg/sql/logictest/logic.go @@ -49,9 +49,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/distsqlrun" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/distsqlutils" @@ -1588,7 +1588,7 @@ func (t *logicTest) processSubtest( return errors.Errorf("kv-batch-size needs an integer argument; %s", err) } t.outf("Setting kv batch size %d", batchSize) - defer sqlbase.SetKVBatchSize(int64(batchSize))() + defer row.SetKVBatchSize(int64(batchSize))() default: return errors.Errorf("%s:%d: unknown command: %s", diff --git a/pkg/sql/planner.go b/pkg/sql/planner.go index 8c6d2cb7ecd0..4f2dd7f27ef3 100644 --- a/pkg/sql/planner.go +++ b/pkg/sql/planner.go @@ -25,6 +25,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/coltypes" "github.com/cockroachdb/cockroach/pkg/sql/opt/xform" "github.com/cockroachdb/cockroach/pkg/sql/parser" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/transform" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sem/types" @@ -395,17 +396,17 @@ func (p *planner) ResolveTableName(ctx context.Context, tn *tree.TableName) erro // TableCollection.getTableVersionByID for how it's used. func (p *planner) LookupTableByID( ctx context.Context, tableID sqlbase.ID, -) (sqlbase.TableLookup, error) { +) (row.TableLookup, error) { flags := ObjectLookupFlags{ CommonLookupFlags{txn: p.txn, avoidCached: p.avoidCachedDescriptors}} table, err := p.Tables().getTableVersionByID(ctx, tableID, flags) if err != nil { if err == errTableAdding { - return sqlbase.TableLookup{IsAdding: true}, nil + return row.TableLookup{IsAdding: true}, nil } - return sqlbase.TableLookup{}, err + return row.TableLookup{}, err } - return sqlbase.TableLookup{Table: table}, nil + return row.TableLookup{Table: table}, nil } // TypeAsString enforces (not hints) that the given expression typechecks as a diff --git a/pkg/sql/resolver.go b/pkg/sql/resolver.go index e5600c70e8d2..b2678838842d 100644 --- a/pkg/sql/resolver.go +++ b/pkg/sql/resolver.go @@ -21,6 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/privilege" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -43,7 +44,7 @@ type SchemaResolver interface { CurrentSearchPath() sessiondata.SearchPath CommonLookupFlags(ctx context.Context, required bool) CommonLookupFlags ObjectLookupFlags(ctx context.Context, required bool) ObjectLookupFlags - LookupTableByID(ctx context.Context, id sqlbase.ID) (sqlbase.TableLookup, error) + LookupTableByID(ctx context.Context, id sqlbase.ID) (row.TableLookup, error) } var _ SchemaResolver = &planner{} diff --git a/pkg/sql/sqlbase/cascader.go b/pkg/sql/row/cascader.go similarity index 82% rename from pkg/sql/sqlbase/cascader.go rename to pkg/sql/row/cascader.go index ed1e52c17b6c..816bff47d1c4 100644 --- a/pkg/sql/sqlbase/cascader.go +++ b/pkg/sql/row/cascader.go @@ -12,7 +12,7 @@ // implied. See the License for the specific language governing // permissions and limitations under the License. -package sqlbase +package row import ( "context" @@ -22,6 +22,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/encoding" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -31,31 +32,31 @@ import ( type cascader struct { txn *client.Txn tablesByID TableLookupsByID // TablesDescriptors by Table ID - alloc *DatumAlloc + alloc *sqlbase.DatumAlloc evalCtx *tree.EvalContext - indexPKRowFetchers map[ID]map[IndexID]RowFetcher // PK RowFetchers by Table ID and Index ID + indexPKRowFetchers map[ID]map[sqlbase.IndexID]Fetcher // PK RowFetchers by Table ID and Index ID // Row Deleters - rowDeleters map[ID]RowDeleter // RowDeleters by Table ID - deleterRowFetchers map[ID]RowFetcher // RowFetchers for rowDeleters by Table ID - deletedRows map[ID]*RowContainer // Rows that have been deleted by Table ID + rowDeleters map[ID]Deleter // RowDeleters by Table ID + deleterRowFetchers map[ID]Fetcher // RowFetchers for rowDeleters by Table ID + deletedRows map[ID]*sqlbase.RowContainer // Rows that have been deleted by Table ID // Row Updaters - rowUpdaters map[ID]RowUpdater // RowUpdaters by Table ID - updaterRowFetchers map[ID]RowFetcher // RowFetchers for rowUpdaters by Table ID - originalRows map[ID]*RowContainer // Original values for rows that have been updated by Table ID - updatedRows map[ID]*RowContainer // New values for rows that have been updated by Table ID + rowUpdaters map[ID]Updater // RowUpdaters by Table ID + updaterRowFetchers map[ID]Fetcher // RowFetchers for rowUpdaters by Table ID + originalRows map[ID]*sqlbase.RowContainer // Original values for rows that have been updated by Table ID + updatedRows map[ID]*sqlbase.RowContainer // New values for rows that have been updated by Table ID } // makeDeleteCascader only creates a cascader if there is a chance that there is // a possible cascade. It returns a cascader if one is required and nil if not. func makeDeleteCascader( txn *client.Txn, - table *TableDescriptor, + table *sqlbase.TableDescriptor, tablesByID TableLookupsByID, evalCtx *tree.EvalContext, - alloc *DatumAlloc, + alloc *sqlbase.DatumAlloc, ) (*cascader, error) { if evalCtx == nil { return nil, pgerror.NewAssertionErrorf("evalContext is nil") @@ -77,9 +78,9 @@ Outer: if err != nil { return nil, err } - if referencingIndex.ForeignKey.OnDelete == ForeignKeyReference_CASCADE || - referencingIndex.ForeignKey.OnDelete == ForeignKeyReference_SET_DEFAULT || - referencingIndex.ForeignKey.OnDelete == ForeignKeyReference_SET_NULL { + if referencingIndex.ForeignKey.OnDelete == sqlbase.ForeignKeyReference_CASCADE || + referencingIndex.ForeignKey.OnDelete == sqlbase.ForeignKeyReference_SET_DEFAULT || + referencingIndex.ForeignKey.OnDelete == sqlbase.ForeignKeyReference_SET_NULL { required = true break Outer } @@ -91,14 +92,14 @@ Outer: return &cascader{ txn: txn, tablesByID: tablesByID, - indexPKRowFetchers: make(map[ID]map[IndexID]RowFetcher), - rowDeleters: make(map[ID]RowDeleter), - deleterRowFetchers: make(map[ID]RowFetcher), - deletedRows: make(map[ID]*RowContainer), - rowUpdaters: make(map[ID]RowUpdater), - updaterRowFetchers: make(map[ID]RowFetcher), - originalRows: make(map[ID]*RowContainer), - updatedRows: make(map[ID]*RowContainer), + indexPKRowFetchers: make(map[ID]map[sqlbase.IndexID]Fetcher), + rowDeleters: make(map[ID]Deleter), + deleterRowFetchers: make(map[ID]Fetcher), + deletedRows: make(map[ID]*sqlbase.RowContainer), + rowUpdaters: make(map[ID]Updater), + updaterRowFetchers: make(map[ID]Fetcher), + originalRows: make(map[ID]*sqlbase.RowContainer), + updatedRows: make(map[ID]*sqlbase.RowContainer), evalCtx: evalCtx, alloc: alloc, }, nil @@ -108,17 +109,17 @@ Outer: // a possible cascade. It returns a cascader if one is required and nil if not. func makeUpdateCascader( txn *client.Txn, - table *TableDescriptor, + table *sqlbase.TableDescriptor, tablesByID TableLookupsByID, - updateCols []ColumnDescriptor, + updateCols []sqlbase.ColumnDescriptor, evalCtx *tree.EvalContext, - alloc *DatumAlloc, + alloc *sqlbase.DatumAlloc, ) (*cascader, error) { if evalCtx == nil { return nil, pgerror.NewAssertionErrorf("evalContext is nil") } var required bool - colIDs := make(map[ColumnID]struct{}) + colIDs := make(map[sqlbase.ColumnID]struct{}) for _, col := range updateCols { colIDs[col.ID] = struct{}{} } @@ -148,9 +149,9 @@ Outer: if err != nil { return nil, err } - if referencingIndex.ForeignKey.OnUpdate == ForeignKeyReference_CASCADE || - referencingIndex.ForeignKey.OnUpdate == ForeignKeyReference_SET_DEFAULT || - referencingIndex.ForeignKey.OnUpdate == ForeignKeyReference_SET_NULL { + if referencingIndex.ForeignKey.OnUpdate == sqlbase.ForeignKeyReference_CASCADE || + referencingIndex.ForeignKey.OnUpdate == sqlbase.ForeignKeyReference_SET_DEFAULT || + referencingIndex.ForeignKey.OnUpdate == sqlbase.ForeignKeyReference_SET_NULL { required = true break Outer } @@ -162,14 +163,14 @@ Outer: return &cascader{ txn: txn, tablesByID: tablesByID, - indexPKRowFetchers: make(map[ID]map[IndexID]RowFetcher), - rowDeleters: make(map[ID]RowDeleter), - deleterRowFetchers: make(map[ID]RowFetcher), - deletedRows: make(map[ID]*RowContainer), - rowUpdaters: make(map[ID]RowUpdater), - updaterRowFetchers: make(map[ID]RowFetcher), - originalRows: make(map[ID]*RowContainer), - updatedRows: make(map[ID]*RowContainer), + indexPKRowFetchers: make(map[ID]map[sqlbase.IndexID]Fetcher), + rowDeleters: make(map[ID]Deleter), + deleterRowFetchers: make(map[ID]Fetcher), + deletedRows: make(map[ID]*sqlbase.RowContainer), + rowUpdaters: make(map[ID]Updater), + updaterRowFetchers: make(map[ID]Fetcher), + originalRows: make(map[ID]*sqlbase.RowContainer), + updatedRows: make(map[ID]*sqlbase.RowContainer), evalCtx: evalCtx, alloc: alloc, }, nil @@ -190,10 +191,10 @@ func (c *cascader) clear(ctx context.Context) { // spanForIndexValues creates a span against an index to extract the primary // keys needed for cascading. func spanForIndexValues( - table *TableDescriptor, - index *IndexDescriptor, + table *sqlbase.TableDescriptor, + index *sqlbase.IndexDescriptor, prefixLen int, - indexColIDs map[ColumnID]int, + indexColIDs map[sqlbase.ColumnID]int, values []tree.Datum, keyPrefix []byte, ) (roachpb.Span, error) { @@ -209,7 +210,7 @@ func spanForIndexValues( if nulls { return roachpb.Span{}, nil } - keyBytes, _, err := EncodePartialIndexKey(table, index, prefixLen, indexColIDs, values, keyPrefix) + keyBytes, _, err := sqlbase.EncodePartialIndexKey(table, index, prefixLen, indexColIDs, values, keyPrefix) if err != nil { return roachpb.Span{}, err } @@ -226,20 +227,20 @@ func spanForIndexValues( // the request to the referencing table. func batchRequestForIndexValues( ctx context.Context, - referencedIndex *IndexDescriptor, - referencingTable *TableDescriptor, - referencingIndex *IndexDescriptor, + referencedIndex *sqlbase.IndexDescriptor, + referencingTable *sqlbase.TableDescriptor, + referencingIndex *sqlbase.IndexDescriptor, values cascadeQueueElement, -) (roachpb.BatchRequest, map[ColumnID]int, error) { +) (roachpb.BatchRequest, map[sqlbase.ColumnID]int, error) { //TODO(bram): consider caching some of these values - keyPrefix := MakeIndexKeyPrefix(referencingTable, referencingIndex.ID) + keyPrefix := sqlbase.MakeIndexKeyPrefix(referencingTable, referencingIndex.ID) prefixLen := len(referencingIndex.ColumnIDs) if len(referencedIndex.ColumnIDs) < prefixLen { prefixLen = len(referencedIndex.ColumnIDs) } - colIDtoRowIndex := make(map[ColumnID]int, len(referencedIndex.ColumnIDs)) + colIDtoRowIndex := make(map[sqlbase.ColumnID]int, len(referencedIndex.ColumnIDs)) for i, referencedColID := range referencedIndex.ColumnIDs[:prefixLen] { if found, ok := values.colIDtoRowIndex[referencedColID]; ok { colIDtoRowIndex[referencingIndex.ColumnIDs[i]] = found @@ -273,7 +274,7 @@ func batchRequestForIndexValues( // spanForPKValues creates a span against the primary index of a table and is // used to fetch rows for cascading. func spanForPKValues( - table *TableDescriptor, fetchColIDtoRowIndex map[ColumnID]int, values tree.Datums, + table *sqlbase.TableDescriptor, fetchColIDtoRowIndex map[sqlbase.ColumnID]int, values tree.Datums, ) (roachpb.Span, error) { return spanForIndexValues( table, @@ -281,14 +282,16 @@ func spanForPKValues( len(table.PrimaryIndex.ColumnIDs), fetchColIDtoRowIndex, values, - MakeIndexKeyPrefix(table, table.PrimaryIndex.ID), + sqlbase.MakeIndexKeyPrefix(table, table.PrimaryIndex.ID), ) } // batchRequestForPKValues creates a batch request against the primary index of // a table and is used to fetch rows for cascading. func batchRequestForPKValues( - table *TableDescriptor, fetchColIDtoRowIndex map[ColumnID]int, values *RowContainer, + table *sqlbase.TableDescriptor, + fetchColIDtoRowIndex map[sqlbase.ColumnID]int, + values *sqlbase.RowContainer, ) (roachpb.BatchRequest, error) { var req roachpb.BatchRequest for i := 0; i < values.Len(); i++ { @@ -307,8 +310,8 @@ func batchRequestForPKValues( // fetch the primary keys of the rows that will be affected by a cascading // action. func (c *cascader) addIndexPKRowFetcher( - table *TableDescriptor, index *IndexDescriptor, -) (RowFetcher, error) { + table *sqlbase.TableDescriptor, index *sqlbase.IndexDescriptor, +) (Fetcher, error) { // Is there a cached row fetcher? rowFetchersForTable, exists := c.indexPKRowFetchers[table.ID] if exists { @@ -317,28 +320,28 @@ func (c *cascader) addIndexPKRowFetcher( return rowFetcher, nil } } else { - c.indexPKRowFetchers[table.ID] = make(map[IndexID]RowFetcher) + c.indexPKRowFetchers[table.ID] = make(map[sqlbase.IndexID]Fetcher) } // Create a new row fetcher. Only the primary key columns are required. - var colDesc []ColumnDescriptor + var colDesc []sqlbase.ColumnDescriptor for _, id := range table.PrimaryIndex.ColumnIDs { cDesc, err := table.FindColumnByID(id) if err != nil { - return RowFetcher{}, err + return Fetcher{}, err } colDesc = append(colDesc, *cDesc) } var valNeededForCol util.FastIntSet valNeededForCol.AddRange(0, len(colDesc)-1) isSecondary := table.PrimaryIndex.ID != index.ID - var rowFetcher RowFetcher + var rowFetcher Fetcher if err := rowFetcher.Init( false, /* reverse */ false, /* returnRangeInfo */ false, /* isCheck */ c.alloc, - RowFetcherTableArgs{ + FetcherTableArgs{ Desc: table, Index: index, ColIdxMap: ColIDtoRowIndexFromCols(colDesc), @@ -347,7 +350,7 @@ func (c *cascader) addIndexPKRowFetcher( ValNeededForCol: valNeededForCol, }, ); err != nil { - return RowFetcher{}, err + return Fetcher{}, err } // Cache the row fetcher. c.indexPKRowFetchers[table.ID][index.ID] = rowFetcher @@ -355,12 +358,12 @@ func (c *cascader) addIndexPKRowFetcher( } // addRowDeleter creates the row deleter and primary index row fetcher. -func (c *cascader) addRowDeleter(table *TableDescriptor) (RowDeleter, RowFetcher, error) { +func (c *cascader) addRowDeleter(table *sqlbase.TableDescriptor) (Deleter, Fetcher, error) { // Is there a cached row fetcher and deleter? if rowDeleter, exists := c.rowDeleters[table.ID]; exists { rowFetcher, existsFetcher := c.deleterRowFetchers[table.ID] if !existsFetcher { - return RowDeleter{}, RowFetcher{}, pgerror.NewAssertionErrorf("no corresponding row fetcher for the row deleter for table: (%d)%s", + return Deleter{}, Fetcher{}, pgerror.NewAssertionErrorf("no corresponding row fetcher for the row deleter for table: (%d)%s", table.ID, table.Name, ) } @@ -378,14 +381,14 @@ func (c *cascader) addRowDeleter(table *TableDescriptor) (RowDeleter, RowFetcher c.alloc, ) if err != nil { - return RowDeleter{}, RowFetcher{}, err + return Deleter{}, Fetcher{}, err } // Create the row fetcher that will retrive the rows and columns needed for // deletion. var valNeededForCol util.FastIntSet valNeededForCol.AddRange(0, len(rowDeleter.FetchCols)-1) - tableArgs := RowFetcherTableArgs{ + tableArgs := FetcherTableArgs{ Desc: table, Index: &table.PrimaryIndex, ColIdxMap: rowDeleter.FetchColIDtoRowIndex, @@ -393,7 +396,7 @@ func (c *cascader) addRowDeleter(table *TableDescriptor) (RowDeleter, RowFetcher Cols: rowDeleter.FetchCols, ValNeededForCol: valNeededForCol, } - var rowFetcher RowFetcher + var rowFetcher Fetcher if err := rowFetcher.Init( false, /* reverse */ false, /* returnRangeInfo */ @@ -401,7 +404,7 @@ func (c *cascader) addRowDeleter(table *TableDescriptor) (RowDeleter, RowFetcher c.alloc, tableArgs, ); err != nil { - return RowDeleter{}, RowFetcher{}, err + return Deleter{}, Fetcher{}, err } // Cache both the fetcher and deleter. @@ -411,13 +414,13 @@ func (c *cascader) addRowDeleter(table *TableDescriptor) (RowDeleter, RowFetcher } // addRowUpdater creates the row updater and primary index row fetcher. -func (c *cascader) addRowUpdater(table *TableDescriptor) (RowUpdater, RowFetcher, error) { +func (c *cascader) addRowUpdater(table *sqlbase.TableDescriptor) (Updater, Fetcher, error) { // Is there a cached updater? rowUpdater, existsUpdater := c.rowUpdaters[table.ID] if existsUpdater { rowFetcher, existsFetcher := c.updaterRowFetchers[table.ID] if !existsFetcher { - return RowUpdater{}, RowFetcher{}, pgerror.NewAssertionErrorf("no corresponding row fetcher for the row updater for table: (%d)%s", + return Updater{}, Fetcher{}, pgerror.NewAssertionErrorf("no corresponding row fetcher for the row updater for table: (%d)%s", table.ID, table.Name, ) } @@ -426,24 +429,24 @@ func (c *cascader) addRowUpdater(table *TableDescriptor) (RowUpdater, RowFetcher // Create the row updater. The row updater requires all the columns in the // table. - rowUpdater, err := makeRowUpdaterWithoutCascader( + rowUpdater, err := makeUpdaterWithoutCascader( c.txn, table, c.tablesByID, table.Columns, nil, /* requestedCol */ - RowUpdaterDefault, + UpdaterDefault, c.alloc, ) if err != nil { - return RowUpdater{}, RowFetcher{}, err + return Updater{}, Fetcher{}, err } // Create the row fetcher that will retrive the rows and columns needed for // deletion. var valNeededForCol util.FastIntSet valNeededForCol.AddRange(0, len(rowUpdater.FetchCols)-1) - tableArgs := RowFetcherTableArgs{ + tableArgs := FetcherTableArgs{ Desc: table, Index: &table.PrimaryIndex, ColIdxMap: rowUpdater.FetchColIDtoRowIndex, @@ -451,7 +454,7 @@ func (c *cascader) addRowUpdater(table *TableDescriptor) (RowUpdater, RowFetcher Cols: rowUpdater.FetchCols, ValNeededForCol: valNeededForCol, } - var rowFetcher RowFetcher + var rowFetcher Fetcher if err := rowFetcher.Init( false, /* reverse */ false, /* returnRangeInfo */ @@ -459,7 +462,7 @@ func (c *cascader) addRowUpdater(table *TableDescriptor) (RowUpdater, RowFetcher c.alloc, tableArgs, ); err != nil { - return RowUpdater{}, RowFetcher{}, err + return Updater{}, Fetcher{}, err } // Cache the updater and the fetcher. @@ -473,12 +476,12 @@ func (c *cascader) addRowUpdater(table *TableDescriptor) (RowUpdater, RowFetcher // happens in a single batch. func (c *cascader) deleteRows( ctx context.Context, - referencedIndex *IndexDescriptor, - referencingTable *TableDescriptor, - referencingIndex *IndexDescriptor, + referencedIndex *sqlbase.IndexDescriptor, + referencingTable *sqlbase.TableDescriptor, + referencingIndex *sqlbase.IndexDescriptor, values cascadeQueueElement, traceKV bool, -) (*RowContainer, map[ColumnID]int, int, error) { +) (*sqlbase.RowContainer, map[sqlbase.ColumnID]int, int, error) { // Create the span to search for index values. // TODO(bram): This initial index lookup can be skipped if the index is the // primary index. @@ -511,11 +514,11 @@ func (c *cascader) deleteRows( // Fetch all the primary keys that need to be deleted. // TODO(Bram): consider chunking this into n, primary keys, perhaps 100. - pkColTypeInfo, err := makeColTypeInfo(referencingTable, indexPKRowFetcherColIDToRowIndex) + pkColTypeInfo, err := sqlbase.MakeColTypeInfo(referencingTable, indexPKRowFetcherColIDToRowIndex) if err != nil { return nil, nil, 0, err } - primaryKeysToDelete := NewRowContainer( + primaryKeysToDelete := sqlbase.NewRowContainer( c.evalCtx.Mon.MakeBoundAccount(), pkColTypeInfo, values.originalValues.Len(), ) defer primaryKeysToDelete.Close(ctx) @@ -573,11 +576,11 @@ func (c *cascader) deleteRows( // the queue to avoid having to double the memory used. if _, exists := c.deletedRows[referencingTable.ID]; !exists { // Fetch the rows for deletion and store them in a container. - colTypeInfo, err := makeColTypeInfo(referencingTable, rowDeleter.FetchColIDtoRowIndex) + colTypeInfo, err := sqlbase.MakeColTypeInfo(referencingTable, rowDeleter.FetchColIDtoRowIndex) if err != nil { return nil, nil, 0, err } - c.deletedRows[referencingTable.ID] = NewRowContainer( + c.deletedRows[referencingTable.ID] = sqlbase.NewRowContainer( c.evalCtx.Mon.MakeBoundAccount(), colTypeInfo, primaryKeysToDelete.Len(), ) } @@ -625,13 +628,13 @@ func (c *cascader) deleteRows( // new values. This update happens in a single batch. func (c *cascader) updateRows( ctx context.Context, - referencedIndex *IndexDescriptor, - referencingTable *TableDescriptor, - referencingIndex *IndexDescriptor, + referencedIndex *sqlbase.IndexDescriptor, + referencingTable *sqlbase.TableDescriptor, + referencingIndex *sqlbase.IndexDescriptor, values cascadeQueueElement, - action ForeignKeyReference_Action, + action sqlbase.ForeignKeyReference_Action, traceKV bool, -) (*RowContainer, *RowContainer, map[ColumnID]int, int, error) { +) (*sqlbase.RowContainer, *sqlbase.RowContainer, map[sqlbase.ColumnID]int, int, error) { // Create the span to search for index values. if traceKV { log.VEventf(ctx, 2, "cascading update into table: %d using index: %d", @@ -651,14 +654,14 @@ func (c *cascader) updateRows( // rowContainers for are also used by the queue to avoid having to double the // memory used. if _, exists := c.originalRows[referencingTable.ID]; !exists { - colTypeInfo, err := makeColTypeInfo(referencingTable, rowUpdater.FetchColIDtoRowIndex) + colTypeInfo, err := sqlbase.MakeColTypeInfo(referencingTable, rowUpdater.FetchColIDtoRowIndex) if err != nil { return nil, nil, nil, 0, err } - c.originalRows[referencingTable.ID] = NewRowContainer( + c.originalRows[referencingTable.ID] = sqlbase.NewRowContainer( c.evalCtx.Mon.MakeBoundAccount(), colTypeInfo, values.originalValues.Len(), ) - c.updatedRows[referencingTable.ID] = NewRowContainer( + c.updatedRows[referencingTable.ID] = sqlbase.NewRowContainer( c.evalCtx.Mon.MakeBoundAccount(), colTypeInfo, values.originalValues.Len(), ) } @@ -671,15 +674,15 @@ func (c *cascader) updateRows( // Populate a map of all columns that need to be set if the action is not // cascade. - var referencingIndexValuesByColIDs map[ColumnID]tree.Datum + var referencingIndexValuesByColIDs map[sqlbase.ColumnID]tree.Datum switch action { - case ForeignKeyReference_SET_NULL: - referencingIndexValuesByColIDs = make(map[ColumnID]tree.Datum) + case sqlbase.ForeignKeyReference_SET_NULL: + referencingIndexValuesByColIDs = make(map[sqlbase.ColumnID]tree.Datum) for _, columnID := range referencingIndex.ColumnIDs { referencingIndexValuesByColIDs[columnID] = tree.DNull } - case ForeignKeyReference_SET_DEFAULT: - referencingIndexValuesByColIDs = make(map[ColumnID]tree.Datum) + case sqlbase.ForeignKeyReference_SET_DEFAULT: + referencingIndexValuesByColIDs = make(map[sqlbase.ColumnID]tree.Datum) for _, columnID := range referencingIndex.ColumnIDs { column, err := referencingTable.FindColumnByID(columnID) if err != nil { @@ -740,11 +743,11 @@ func (c *cascader) updateRows( // Fetch all the primary keys for rows that will be updated. // TODO(Bram): consider chunking this into n, primary keys, perhaps 100. - pkColTypeInfo, err := makeColTypeInfo(referencingTable, indexPKRowFetcherColIDToRowIndex) + pkColTypeInfo, err := sqlbase.MakeColTypeInfo(referencingTable, indexPKRowFetcherColIDToRowIndex) if err != nil { return nil, nil, nil, 0, err } - primaryKeysToUpdate := NewRowContainer( + primaryKeysToUpdate := sqlbase.NewRowContainer( c.evalCtx.Mon.MakeBoundAccount(), pkColTypeInfo, values.originalValues.Len(), ) defer primaryKeysToUpdate.Close(ctx) @@ -805,7 +808,7 @@ func (c *cascader) updateRows( updateRow := make(tree.Datums, len(rowUpdater.updateColIDtoRowIndex)) switch action { - case ForeignKeyReference_CASCADE: + case sqlbase.ForeignKeyReference_CASCADE: // Create the updateRow based on the passed in updated values and from // the retrieved row as a fallback. currentUpdatedValue := values.updatedValues.At(i) @@ -818,7 +821,7 @@ func (c *cascader) updateRows( return nil, nil, nil, 0, err } if !column.Nullable { - database, err := GetDatabaseDescFromID(ctx, c.txn, referencingTable.ParentID) + database, err := sqlbase.GetDatabaseDescFromID(ctx, c.txn, referencingTable.ParentID) if err != nil { return nil, nil, nil, 0, err } @@ -837,7 +840,7 @@ func (c *cascader) updateRows( colID, ) } - case ForeignKeyReference_SET_NULL, ForeignKeyReference_SET_DEFAULT: + case sqlbase.ForeignKeyReference_SET_NULL, sqlbase.ForeignKeyReference_SET_DEFAULT: // Create the updateRow based on the original values and for all // values in the index, either nulls (for SET NULL), or default (for // SET DEFAULT). @@ -889,15 +892,15 @@ func (c *cascader) updateRows( } type cascadeQueueElement struct { - table *TableDescriptor + table *sqlbase.TableDescriptor // These row containers are defined elsewhere and their memory is not managed // by the queue. The updated values can be nil for deleted rows. If it does // exist, every row in originalValues must have a corresponding row in // updatedValues at the exact same index. They also must have the exact same // rank. - originalValues *RowContainer - updatedValues *RowContainer - colIDtoRowIndex map[ColumnID]int + originalValues *sqlbase.RowContainer + updatedValues *sqlbase.RowContainer + colIDtoRowIndex map[sqlbase.ColumnID]int startIndex int // Start of the range of rows in the row container. endIndex int // End of the range of rows (exclusive) in the row container. } @@ -911,10 +914,10 @@ type cascadeQueue []cascadeQueueElement // all the rows following that index. func (q *cascadeQueue) enqueue( ctx context.Context, - table *TableDescriptor, - originalValues *RowContainer, - updatedValues *RowContainer, - colIDtoRowIndex map[ColumnID]int, + table *sqlbase.TableDescriptor, + originalValues *sqlbase.RowContainer, + updatedValues *sqlbase.RowContainer, + colIDtoRowIndex map[sqlbase.ColumnID]int, startIndex int, ) error { *q = append(*q, cascadeQueueElement{ @@ -941,30 +944,30 @@ func (q *cascadeQueue) dequeue() (cascadeQueueElement, bool) { // remaining indexes to ensure that no orphans were created. func (c *cascader) cascadeAll( ctx context.Context, - table *TableDescriptor, + table *sqlbase.TableDescriptor, originalValues tree.Datums, updatedValues tree.Datums, - colIDtoRowIndex map[ColumnID]int, + colIDtoRowIndex map[sqlbase.ColumnID]int, traceKV bool, ) error { defer c.clear(ctx) var cascadeQ cascadeQueue // Enqueue the first values. - colTypeInfo, err := makeColTypeInfo(table, colIDtoRowIndex) + colTypeInfo, err := sqlbase.MakeColTypeInfo(table, colIDtoRowIndex) if err != nil { return err } - originalRowContainer := NewRowContainer( + originalRowContainer := sqlbase.NewRowContainer( c.evalCtx.Mon.MakeBoundAccount(), colTypeInfo, len(originalValues), ) defer originalRowContainer.Close(ctx) if _, err := originalRowContainer.AddRow(ctx, originalValues); err != nil { return err } - var updatedRowContainer *RowContainer + var updatedRowContainer *sqlbase.RowContainer if updatedValues != nil { - updatedRowContainer = NewRowContainer( + updatedRowContainer = sqlbase.NewRowContainer( c.evalCtx.Mon.MakeBoundAccount(), colTypeInfo, len(updatedValues), ) defer updatedRowContainer.Close(ctx) @@ -980,7 +983,7 @@ func (c *cascader) cascadeAll( for { select { case <-ctx.Done(): - return QueryCanceledError + return sqlbase.QueryCanceledError default: } elem, exists := cascadeQ.dequeue() @@ -1005,7 +1008,7 @@ func (c *cascader) cascadeAll( if elem.updatedValues == nil { // Deleting a row. switch referencingIndex.ForeignKey.OnDelete { - case ForeignKeyReference_CASCADE: + case sqlbase.ForeignKeyReference_CASCADE: deletedRows, colIDtoRowIndex, startIndex, err := c.deleteRows( ctx, &referencedIndex, @@ -1030,7 +1033,7 @@ func (c *cascader) cascadeAll( return err } } - case ForeignKeyReference_SET_NULL, ForeignKeyReference_SET_DEFAULT: + case sqlbase.ForeignKeyReference_SET_NULL, sqlbase.ForeignKeyReference_SET_DEFAULT: originalAffectedRows, updatedAffectedRows, colIDtoRowIndex, startIndex, err := c.updateRows( ctx, &referencedIndex, @@ -1060,7 +1063,7 @@ func (c *cascader) cascadeAll( } else { // Updating a row. switch referencingIndex.ForeignKey.OnUpdate { - case ForeignKeyReference_CASCADE, ForeignKeyReference_SET_NULL, ForeignKeyReference_SET_DEFAULT: + case sqlbase.ForeignKeyReference_CASCADE, sqlbase.ForeignKeyReference_SET_NULL, sqlbase.ForeignKeyReference_SET_DEFAULT: originalAffectedRows, updatedAffectedRows, colIDtoRowIndex, startIndex, err := c.updateRows( ctx, &referencedIndex, diff --git a/pkg/sql/row/errors.go b/pkg/sql/row/errors.go new file mode 100644 index 000000000000..afc63c29b365 --- /dev/null +++ b/pkg/sql/row/errors.go @@ -0,0 +1,140 @@ +// Copyright 2018 The Cockroach Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package row + +import ( + "context" + "fmt" + "strings" + + "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" + "github.com/cockroachdb/cockroach/pkg/util" +) + +// singleKVFetcher is a kvFetcher that returns a single kv. +type singleKVFetcher struct { + kvs [1]roachpb.KeyValue + done bool +} + +// nextBatch implements the kvFetcher interface. +func (f *singleKVFetcher) nextBatch( + _ context.Context, +) (ok bool, kvs []roachpb.KeyValue, batchResponse []byte, numKvs int64, err error) { + if f.done { + return false, nil, nil, 0, nil + } + f.done = true + return true, f.kvs[:], nil, 0, nil +} + +// getRangesInfo implements the kvFetcher interface. +func (f *singleKVFetcher) getRangesInfo() []roachpb.RangeInfo { + panic("getRangesInfo() called on singleKVFetcher") +} + +// ConvertBatchError returns a user friendly constraint violation error. +func ConvertBatchError( + ctx context.Context, tableDesc *sqlbase.TableDescriptor, b *client.Batch, +) error { + origPErr := b.MustPErr() + if origPErr.Index == nil { + return origPErr.GoError() + } + j := origPErr.Index.Index + if j >= int32(len(b.Results)) { + panic(fmt.Sprintf("index %d outside of results: %+v", j, b.Results)) + } + result := b.Results[j] + if cErr, ok := origPErr.GetDetail().(*roachpb.ConditionFailedError); ok && len(result.Rows) > 0 { + key := result.Rows[0].Key + // TODO(dan): There's too much internal knowledge of the sql table + // encoding here (and this callsite is the only reason + // DecodeIndexKeyPrefix is exported). Refactor this bit out. + indexID, _, err := sqlbase.DecodeIndexKeyPrefix(tableDesc, key) + if err != nil { + return err + } + index, err := tableDesc.FindIndexByID(indexID) + if err != nil { + return err + } + var rf Fetcher + + var valNeededForCol util.FastIntSet + valNeededForCol.AddRange(0, len(index.ColumnIDs)-1) + + colIdxMap := make(map[sqlbase.ColumnID]int, len(index.ColumnIDs)) + cols := make([]sqlbase.ColumnDescriptor, len(index.ColumnIDs)) + for i, colID := range index.ColumnIDs { + colIdxMap[colID] = i + col, err := tableDesc.FindColumnByID(colID) + if err != nil { + return err + } + cols[i] = *col + } + + tableArgs := FetcherTableArgs{ + Desc: tableDesc, + Index: index, + ColIdxMap: colIdxMap, + IsSecondaryIndex: indexID != tableDesc.PrimaryIndex.ID, + Cols: cols, + ValNeededForCol: valNeededForCol, + } + if err := rf.Init( + false /* reverse */, false /* returnRangeInfo */, false /* isCheck */, &sqlbase.DatumAlloc{}, tableArgs, + ); err != nil { + return err + } + f := singleKVFetcher{kvs: [1]roachpb.KeyValue{{Key: key}}} + if cErr.ActualValue != nil { + f.kvs[0].Value = *cErr.ActualValue + } + // Use the Fetcher to decode the single kv pair above by passing in + // this singleKVFetcher implementation, which doesn't actually hit KV. + if err := rf.StartScanFrom(ctx, &f); err != nil { + return err + } + datums, _, _, err := rf.NextRowDecoded(ctx) + if err != nil { + return err + } + return NewUniquenessConstraintViolationError(index, datums) + } + return origPErr.GoError() +} + +// NewUniquenessConstraintViolationError creates an error that represents a +// violation of a UNIQUE constraint. +func NewUniquenessConstraintViolationError( + index *sqlbase.IndexDescriptor, vals []tree.Datum, +) error { + valStrs := make([]string, 0, len(vals)) + for _, val := range vals { + valStrs = append(valStrs, val.String()) + } + + return pgerror.NewErrorf(pgerror.CodeUniqueViolationError, + "duplicate key value (%s)=(%s) violates unique constraint %q", + strings.Join(index.ColumnNames, ","), + strings.Join(valStrs, ","), + index.Name) +} diff --git a/pkg/sql/sqlbase/rowfetcher.go b/pkg/sql/row/fetcher.go similarity index 85% rename from pkg/sql/sqlbase/rowfetcher.go rename to pkg/sql/row/fetcher.go index cbbb43387850..7d063dbeb968 100644 --- a/pkg/sql/sqlbase/rowfetcher.go +++ b/pkg/sql/row/fetcher.go @@ -12,7 +12,7 @@ // implied. See the License for the specific language governing // permissions and limitations under the License. -package sqlbase +package row import ( "bytes" @@ -27,6 +27,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/scrub" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/storage/engine/enginepb" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/encoding" @@ -54,8 +55,8 @@ type tableInfo struct { // Used to determine whether a key retrieved belongs to the span we // want to scan. spans roachpb.Spans - desc *TableDescriptor - index *IndexDescriptor + desc *sqlbase.TableDescriptor + index *sqlbase.IndexDescriptor isSecondaryIndex bool indexColumnDirs []encoding.Direction // equivSignature is an equivalence class for each unique table-index @@ -65,7 +66,7 @@ type tableInfo struct { // The table columns to use for fetching, possibly including ones currently in // schema changes. - cols []ColumnDescriptor + cols []sqlbase.ColumnDescriptor // The set of ColumnIDs that are required. neededCols util.FastIntSet @@ -80,7 +81,7 @@ type tableInfo struct { neededValueCols int // Map used to get the index for columns in cols. - colIdxMap map[ColumnID]int + colIdxMap map[sqlbase.ColumnID]int // One value per column that is part of the key; each value is a column // index (into cols); -1 if we don't need the value for that column. @@ -88,16 +89,16 @@ type tableInfo struct { // -- Fields updated during a scan -- - keyValTypes []ColumnType - extraTypes []ColumnType - keyVals []EncDatum - extraVals []EncDatum - row EncDatumRow + keyValTypes []sqlbase.ColumnType + extraTypes []sqlbase.ColumnType + keyVals []sqlbase.EncDatum + extraVals []sqlbase.EncDatum + row sqlbase.EncDatumRow decodedRow tree.Datums // The following fields contain MVCC metadata for each row and may be - // returned to users of RowFetcher immediately after NextRow returns. - // They're not important to ordinary consumers of RowFetcher that only + // returned to users of Fetcher immediately after NextRow returns. + // They're not important to ordinary consumers of Fetcher that only // concern themselves with actual SQL row data. // // rowLastModified is the timestamp of the last time any family in the row @@ -114,7 +115,7 @@ type tableInfo struct { hasLast bool // lastDatums is a buffer for the current key. It is only present when // doing a physical check in order to verify round-trip encoding. - // It is required because RowFetcher.kv is overwritten before NextRow + // It is required because Fetcher.kv is overwritten before NextRow // returns. lastKV roachpb.KeyValue // lastDatums is a buffer for the previously scanned k/v datums. It is @@ -123,27 +124,27 @@ type tableInfo struct { lastDatums tree.Datums } -// RowFetcherTableArgs are the arguments passed to RowFetcher.Init +// FetcherTableArgs are the arguments passed to Fetcher.Init // for a given table that includes descriptors and row information. -type RowFetcherTableArgs struct { - // The spans of keys to return for the given table. RowFetcher +type FetcherTableArgs struct { + // The spans of keys to return for the given table. Fetcher // ignores keys outside these spans. - // This is irrelevant if RowFetcher is initialize with only one + // This is irrelevant if Fetcher is initialize with only one // table. Spans roachpb.Spans - Desc *TableDescriptor - Index *IndexDescriptor - ColIdxMap map[ColumnID]int + Desc *sqlbase.TableDescriptor + Index *sqlbase.IndexDescriptor + ColIdxMap map[sqlbase.ColumnID]int IsSecondaryIndex bool - Cols []ColumnDescriptor + Cols []sqlbase.ColumnDescriptor // The indexes (0 to # of columns - 1) of the columns to return. ValNeededForCol util.FastIntSet } -// RowFetcher handles fetching kvs and forming table rows for an +// Fetcher handles fetching kvs and forming table rows for an // arbitrary number of tables. // Usage: -// var rf RowFetcher +// var rf Fetcher // err := rf.Init(..) // // Handle err // err := rf.StartScan(..) @@ -157,7 +158,7 @@ type RowFetcherTableArgs struct { // } // // Process res.row // } -type RowFetcher struct { +type Fetcher struct { // tables is a slice of all the tables and their descriptors for which // rows are returned. tables []tableInfo @@ -188,7 +189,7 @@ type RowFetcher struct { mustDecodeIndexKey bool // knownPrefixLength is the number of bytes in the index key prefix this - // RowFetcher is configured for. The index key prefix is the table id, index + // Fetcher is configured for. The index key prefix is the table id, index // id pair at the start of the key. knownPrefixLength int @@ -232,25 +233,28 @@ type RowFetcher struct { isCheck bool // Buffered allocation of decoded datums. - alloc *DatumAlloc + alloc *sqlbase.DatumAlloc } -// Reset resets this RowFetcher, preserving the memory capacity that was used +// Reset resets this Fetcher, preserving the memory capacity that was used // for the tables slice, and the slices within each of the tableInfo objects // within tables. This permits reuse of this objects without forcing total // reallocation of all of those slice fields. -func (rf *RowFetcher) Reset() { - *rf = RowFetcher{ +func (rf *Fetcher) Reset() { + *rf = Fetcher{ tables: rf.tables[:0], } } -// Init sets up a RowFetcher for a given table and index. If we are using a +// Init sets up a Fetcher for a given table and index. If we are using a // non-primary index, tables.ValNeededForCol can only refer to columns in the // index. -func (rf *RowFetcher) Init( +func (rf *Fetcher) Init( reverse, - returnRangeInfo bool, isCheck bool, alloc *DatumAlloc, tables ...RowFetcherTableArgs, + returnRangeInfo bool, + isCheck bool, + alloc *sqlbase.DatumAlloc, + tables ...FetcherTableArgs, ) error { if len(tables) == 0 { panic("no tables to fetch from") @@ -285,7 +289,7 @@ func (rf *RowFetcher) Init( index: tableArgs.Index, isSecondaryIndex: tableArgs.IsSecondaryIndex, cols: tableArgs.Cols, - row: make(EncDatumRow, len(tableArgs.Cols)), + row: make(sqlbase.EncDatumRow, len(tableArgs.Cols)), decodedRow: make(tree.Datums, len(tableArgs.Cols)), // These slice fields might get re-allocated below, so reslice them from @@ -298,7 +302,7 @@ func (rf *RowFetcher) Init( var err error if multipleTables { // We produce references to every signature's reference. - equivSignatures, err := TableEquivSignatures(table.desc, table.index) + equivSignatures, err := sqlbase.TableEquivSignatures(table.desc, table.index) if err != nil { return err } @@ -335,9 +339,9 @@ func (rf *RowFetcher) Init( } } - rf.knownPrefixLength = len(MakeIndexKeyPrefix(table.desc, table.index.ID)) + rf.knownPrefixLength = len(sqlbase.MakeIndexKeyPrefix(table.desc, table.index.ID)) - var indexColumnIDs []ColumnID + var indexColumnIDs []sqlbase.ColumnID indexColumnIDs, table.indexColumnDirs = table.index.FullColumnIDs() table.neededValueColsByIdx = tableArgs.ValNeededForCol.Copy() @@ -389,14 +393,14 @@ func (rf *RowFetcher) Init( } // Prepare our index key vals slice. - table.keyValTypes, err = GetColumnTypes(table.desc, indexColumnIDs) + table.keyValTypes, err = sqlbase.GetColumnTypes(table.desc, indexColumnIDs) if err != nil { return err } if cap(table.keyVals) >= nIndexCols { table.keyVals = table.keyVals[:nIndexCols] } else { - table.keyVals = make([]EncDatum, nIndexCols) + table.keyVals = make([]sqlbase.EncDatum, nIndexCols) } if hasExtraCols(&table) { @@ -405,12 +409,12 @@ func (rf *RowFetcher) Init( // Primary indexes only contain ascendingly-encoded // values. If this ever changes, we'll probably have to // figure out the directions here too. - table.extraTypes, err = GetColumnTypes(table.desc, table.index.ExtraColumnIDs) + table.extraTypes, err = sqlbase.GetColumnTypes(table.desc, table.index.ExtraColumnIDs) nExtraColumns := len(table.index.ExtraColumnIDs) if cap(table.extraVals) >= nExtraColumns { table.extraVals = table.extraVals[:nExtraColumns] } else { - table.extraVals = make([]EncDatum, nExtraColumns) + table.extraVals = make([]sqlbase.EncDatum, nExtraColumns) } if err != nil { return err @@ -439,7 +443,7 @@ func (rf *RowFetcher) Init( // StartScan initializes and starts the key-value scan. Can be used multiple // times. -func (rf *RowFetcher) StartScan( +func (rf *Fetcher) StartScan( ctx context.Context, txn *client.Txn, spans roachpb.Spans, @@ -476,7 +480,7 @@ func (rf *RowFetcher) StartScan( // StartScanFrom initializes and starts a scan from the given kvFetcher. Can be // used multiple times. -func (rf *RowFetcher) StartScanFrom(ctx context.Context, f kvFetcher) error { +func (rf *Fetcher) StartScanFrom(ctx context.Context, f kvFetcher) error { rf.indexKey = nil rf.kvFetcher = f rf.kvs = nil @@ -491,7 +495,7 @@ func (rf *RowFetcher) StartScanFrom(ctx context.Context, f kvFetcher) error { // the next batch until there are no more kvs to fetch. // Returns whether or not there are more kvs to fetch, the kv that was fetched, // and any errors that may have occurred. -func (rf *RowFetcher) nextKV(ctx context.Context) (ok bool, kv roachpb.KeyValue, err error) { +func (rf *Fetcher) nextKV(ctx context.Context) (ok bool, kv roachpb.KeyValue, err error) { if len(rf.kvs) != 0 { kv = rf.kvs[0] rf.kvs = rf.kvs[1:] @@ -530,7 +534,7 @@ func (rf *RowFetcher) nextKV(ctx context.Context) (ok bool, kv roachpb.KeyValue, // NextKey retrieves the next key/value and sets kv/kvEnd. Returns whether a row // has been completed. -func (rf *RowFetcher) NextKey(ctx context.Context) (rowDone bool, err error) { +func (rf *Fetcher) NextKey(ctx context.Context) (rowDone bool, err error) { var ok bool for { @@ -613,7 +617,7 @@ func (rf *RowFetcher) NextKey(ctx context.Context) (rowDone bool, err error) { } } -func (rf *RowFetcher) prettyEncDatums(types []ColumnType, vals []EncDatum) string { +func (rf *Fetcher) prettyEncDatums(types []sqlbase.ColumnType, vals []sqlbase.EncDatum) string { var buf bytes.Buffer for i, v := range vals { if err := v.EnsureDecoded(&types[i], rf.alloc); err != nil { @@ -626,12 +630,12 @@ func (rf *RowFetcher) prettyEncDatums(types []ColumnType, vals []EncDatum) strin // ReadIndexKey decodes an index key for a given table. // It returns whether or not the key is for any of the tables initialized -// in RowFetcher, and the remaining part of the key if it is. -func (rf *RowFetcher) ReadIndexKey(key roachpb.Key) (remaining []byte, ok bool, err error) { +// in Fetcher, and the remaining part of the key if it is. +func (rf *Fetcher) ReadIndexKey(key roachpb.Key) (remaining []byte, ok bool, err error) { // If there is only one table to check keys for, there is no need // to go through the equivalence signature checks. if len(rf.tables) == 1 { - return DecodeIndexKeyWithoutTableIDIndexIDPrefix( + return sqlbase.DecodeIndexKeyWithoutTableIDIndexIDPrefix( rf.currentTable.desc, rf.currentTable.index, rf.currentTable.keyValTypes, @@ -647,7 +651,7 @@ func (rf *RowFetcher) ReadIndexKey(key roachpb.Key) (remaining []byte, ok bool, // key now contains the bytes in the key (if match) that are not part // of the signature in order. - tableIdx, key, match, err := IndexKeyEquivSignature(key, rf.allEquivSignatures, rf.keySigBuf, rf.keyRestBuf) + tableIdx, key, match, err := sqlbase.IndexKeyEquivSignature(key, rf.allEquivSignatures, rf.keySigBuf, rf.keyRestBuf) if err != nil { return nil, false, err } @@ -688,7 +692,7 @@ func (rf *RowFetcher) ReadIndexKey(key roachpb.Key) (remaining []byte, ok bool, // We can simply decode all the column values we retrieved // when processing the index key. The column values are at the // front of the key. - if key, err = DecodeKeyVals( + if key, err = sqlbase.DecodeKeyVals( rf.currentTable.keyValTypes, rf.currentTable.keyVals, rf.currentTable.indexColumnDirs, @@ -703,7 +707,7 @@ func (rf *RowFetcher) ReadIndexKey(key roachpb.Key) (remaining []byte, ok bool, // processKV processes the given key/value, setting values in the row // accordingly. If debugStrings is true, returns pretty printed key and value // information in prettyKey/prettyValue (otherwise they are empty strings). -func (rf *RowFetcher) processKV( +func (rf *Fetcher) processKV( ctx context.Context, kv roachpb.KeyValue, ) (prettyKey string, prettyValue string, err error) { table := rf.currentTable @@ -790,8 +794,8 @@ func (rf *RowFetcher) processKV( return "", "", scrub.WrapError(scrub.IndexKeyDecodingError, err) } - var family *ColumnFamilyDescriptor - family, err = table.desc.FindFamilyByID(FamilyID(familyID)) + var family *sqlbase.ColumnFamilyDescriptor + family, err = table.desc.FindFamilyByID(sqlbase.FamilyID(familyID)) if err != nil { return "", "", scrub.WrapError(scrub.IndexKeyDecodingError, err) } @@ -811,7 +815,7 @@ func (rf *RowFetcher) processKV( // This is a unique secondary index; decode the extra // column values from the value. var err error - valueBytes, err = DecodeKeyVals( + valueBytes, err = sqlbase.DecodeKeyVals( table.extraTypes, table.extraVals, nil, @@ -858,10 +862,10 @@ func (rf *RowFetcher) processKV( // processValueSingle processes the given value (of column // family.DefaultColumnID), setting values in table.row accordingly. The key is // only used for logging. -func (rf *RowFetcher) processValueSingle( +func (rf *Fetcher) processValueSingle( ctx context.Context, table *tableInfo, - family *ColumnFamilyDescriptor, + family *sqlbase.ColumnFamilyDescriptor, kv roachpb.KeyValue, prettyKeyPrefix string, ) (prettyKey string, prettyValue string, err error) { @@ -892,14 +896,14 @@ func (rf *RowFetcher) processValueSingle( // although that would require changing UnmarshalColumnValue to operate // on bytes, and for Encode/DecodeTableValue to operate on marshaled // single values. - value, err := UnmarshalColumnValue(rf.alloc, typ, kv.Value) + value, err := sqlbase.UnmarshalColumnValue(rf.alloc, typ, kv.Value) if err != nil { return "", "", err } if rf.traceKV { prettyValue = value.String() } - table.row[idx] = DatumToEncDatum(typ, value) + table.row[idx] = sqlbase.DatumToEncDatum(typ, value) if debugRowFetch { log.Infof(ctx, "Scan %s -> %v", kv.Key, value) } @@ -915,7 +919,7 @@ func (rf *RowFetcher) processValueSingle( return prettyKey, prettyValue, nil } -func (rf *RowFetcher) processValueBytes( +func (rf *Fetcher) processValueBytes( ctx context.Context, table *tableInfo, kv roachpb.KeyValue, @@ -931,7 +935,7 @@ func (rf *RowFetcher) processValueBytes( } var colIDDiff uint32 - var lastColID ColumnID + var lastColID sqlbase.ColumnID var typeOffset, dataOffset int var typ encoding.Type for len(valueBytes) > 0 && rf.valueColsFound < table.neededValueCols { @@ -939,7 +943,7 @@ func (rf *RowFetcher) processValueBytes( if err != nil { return "", "", err } - colID := lastColID + ColumnID(colIDDiff) + colID := lastColID + sqlbase.ColumnID(colIDDiff) lastColID = colID if !table.neededCols.Contains(int(colID)) { // This column wasn't requested, so read its length and skip it. @@ -959,8 +963,8 @@ func (rf *RowFetcher) processValueBytes( prettyKey = fmt.Sprintf("%s/%s", prettyKey, table.desc.Columns[idx].Name) } - var encValue EncDatum - encValue, valueBytes, err = EncDatumValueFromBufferWithOffsetsAndType(valueBytes, typeOffset, + var encValue sqlbase.EncDatum + encValue, valueBytes, err = sqlbase.EncDatumValueFromBufferWithOffsetsAndType(valueBytes, typeOffset, dataOffset, typ) if err != nil { return "", "", err @@ -986,7 +990,7 @@ func (rf *RowFetcher) processValueBytes( // processValueTuple processes the given values (of columns family.ColumnIDs), // setting values in the rf.row accordingly. The key is only used for logging. -func (rf *RowFetcher) processValueTuple( +func (rf *Fetcher) processValueTuple( ctx context.Context, table *tableInfo, kv roachpb.KeyValue, prettyKeyPrefix string, ) (prettyKey string, prettyValue string, err error) { tupleBytes, err := kv.Value.GetTuple() @@ -1004,9 +1008,14 @@ func (rf *RowFetcher) processValueTuple( // be a scrub.ScrubError, which the caller is responsible for unwrapping. // It also returns the table and index descriptor associated with the row // (relevant when more than one table is specified during initialization). -func (rf *RowFetcher) NextRow( +func (rf *Fetcher) NextRow( ctx context.Context, -) (row EncDatumRow, table *TableDescriptor, index *IndexDescriptor, err error) { +) ( + row sqlbase.EncDatumRow, + table *sqlbase.TableDescriptor, + index *sqlbase.IndexDescriptor, + err error, +) { if rf.kvEnd { return nil, nil, nil, nil } @@ -1045,9 +1054,14 @@ func (rf *RowFetcher) NextRow( // When there are no more rows, the Datums is nil. // It also returns the table and index descriptor associated with the row // (relevant when more than one table is specified during initialization). -func (rf *RowFetcher) NextRowDecoded( +func (rf *Fetcher) NextRowDecoded( ctx context.Context, -) (datums tree.Datums, table *TableDescriptor, index *IndexDescriptor, err error) { +) ( + datums tree.Datums, + table *sqlbase.TableDescriptor, + index *sqlbase.IndexDescriptor, + err error, +) { row, table, index, err := rf.NextRow(ctx) if err != nil { err = scrub.UnwrapScrubError(err) @@ -1073,7 +1087,7 @@ func (rf *RowFetcher) NextRowDecoded( // RowLastModified may only be called after NextRow has returned a non-nil row // and returns the timestamp of the last modification to that row. -func (rf *RowFetcher) RowLastModified() hlc.Timestamp { +func (rf *Fetcher) RowLastModified() hlc.Timestamp { return rf.rowReadyTable.rowLastModified } @@ -1081,7 +1095,7 @@ func (rf *RowFetcher) RowLastModified() hlc.Timestamp { // returns true if that row was most recently deleted. This method is only // meaningful when the configured kvFetcher returns deletion tombstones, which // the normal one (via `StartScan`) does not. -func (rf *RowFetcher) RowIsDeleted() bool { +func (rf *Fetcher) RowIsDeleted() bool { return rf.rowReadyTable.rowIsDeleted } @@ -1094,7 +1108,7 @@ func (rf *RowFetcher) RowIsDeleted() bool { // - There is no extra unexpected or incorrect data encoded in the k/v // pair. // - Decoded keys follow the same ordering as their encoding. -func (rf *RowFetcher) NextRowWithErrors(ctx context.Context) (EncDatumRow, error) { +func (rf *Fetcher) NextRowWithErrors(ctx context.Context) (sqlbase.EncDatumRow, error) { row, table, index, err := rf.NextRow(ctx) if row == nil { return nil, nil @@ -1139,10 +1153,10 @@ func (rf *RowFetcher) NextRowWithErrors(ctx context.Context) (EncDatumRow, error // checkPrimaryIndexDatumEncodings will run a round-trip encoding check // on all values in the buffered row. This check is specific to primary // index datums. -func (rf *RowFetcher) checkPrimaryIndexDatumEncodings(ctx context.Context) error { +func (rf *Fetcher) checkPrimaryIndexDatumEncodings(ctx context.Context) error { table := rf.rowReadyTable scratch := make([]byte, 1024) - colIDToColumn := make(map[ColumnID]ColumnDescriptor) + colIDToColumn := make(map[sqlbase.ColumnID]sqlbase.ColumnDescriptor) for _, col := range table.desc.Columns { colIDToColumn[col.ID] = col } @@ -1150,7 +1164,7 @@ func (rf *RowFetcher) checkPrimaryIndexDatumEncodings(ctx context.Context) error rh := rowHelper{TableDesc: table.desc, Indexes: table.desc.Indexes} for _, family := range table.desc.Families { - var lastColID ColumnID + var lastColID sqlbase.ColumnID familySortedColumnIDs, ok := rh.sortedColumnFamily(family.ID) if !ok { panic("invalid family sorted column id map") @@ -1178,14 +1192,14 @@ func (rf *RowFetcher) checkPrimaryIndexDatumEncodings(ctx context.Context) error colIDDiff := col.ID - lastColID lastColID = col.ID - if result, err := EncodeTableValue([]byte(nil), colIDDiff, rowVal.Datum, + if result, err := sqlbase.EncodeTableValue([]byte(nil), colIDDiff, rowVal.Datum, scratch); err != nil { log.Errorf(ctx, "Could not re-encode column %s, value was %#v. Got error %s", col.Name, rowVal.Datum, err) - } else if !bytes.Equal(result, rowVal.encoded) { + } else if !rowVal.BytesEqual(result) { return scrub.WrapError(scrub.IndexValueDecodingError, errors.Errorf( "value failed to round-trip encode. Column=%s colIDDiff=%d Key=%s expected %#v, got: %#v", - col.Name, colIDDiff, rf.kv.Key, rowVal.encoded, result)) + col.Name, colIDDiff, rf.kv.Key, rowVal.EncodedString(), result)) } } } @@ -1195,16 +1209,16 @@ func (rf *RowFetcher) checkPrimaryIndexDatumEncodings(ctx context.Context) error // checkSecondaryIndexDatumEncodings will run a round-trip encoding // check on all values in the buffered row. This check is specific to // secondary index datums. -func (rf *RowFetcher) checkSecondaryIndexDatumEncodings(ctx context.Context) error { +func (rf *Fetcher) checkSecondaryIndexDatumEncodings(ctx context.Context) error { table := rf.rowReadyTable - colToEncDatum := make(map[ColumnID]EncDatum, len(table.row)) + colToEncDatum := make(map[sqlbase.ColumnID]sqlbase.EncDatum, len(table.row)) values := make(tree.Datums, len(table.row)) for i, col := range table.cols { colToEncDatum[col.ID] = table.row[i] values[i] = table.row[i].Datum } - indexEntries, err := EncodeSecondaryIndex(table.desc, table.index, table.colIdxMap, values) + indexEntries, err := sqlbase.EncodeSecondaryIndex(table.desc, table.index, table.colIdxMap, values) if err != nil { return err } @@ -1227,7 +1241,7 @@ func (rf *RowFetcher) checkSecondaryIndexDatumEncodings(ctx context.Context) err // checkKeyOrdering verifies that the datums decoded for the current key // have the same ordering as the encoded key. -func (rf *RowFetcher) checkKeyOrdering(ctx context.Context) error { +func (rf *Fetcher) checkKeyOrdering(ctx context.Context) error { defer func() { rf.rowReadyTable.lastDatums = append(tree.Datums(nil), rf.rowReadyTable.decodedRow...) }() @@ -1242,14 +1256,14 @@ func (rf *RowFetcher) checkKeyOrdering(ctx context.Context) error { idx := rf.rowReadyTable.colIdxMap[id] result := rf.rowReadyTable.decodedRow[idx].Compare(&evalCtx, rf.rowReadyTable.lastDatums[idx]) expectedDirection := rf.rowReadyTable.index.ColumnDirections[i] - if rf.reverse && expectedDirection == IndexDescriptor_ASC { - expectedDirection = IndexDescriptor_DESC - } else if rf.reverse && expectedDirection == IndexDescriptor_DESC { - expectedDirection = IndexDescriptor_ASC + if rf.reverse && expectedDirection == sqlbase.IndexDescriptor_ASC { + expectedDirection = sqlbase.IndexDescriptor_DESC + } else if rf.reverse && expectedDirection == sqlbase.IndexDescriptor_DESC { + expectedDirection = sqlbase.IndexDescriptor_ASC } - if expectedDirection == IndexDescriptor_ASC && result < 0 || - expectedDirection == IndexDescriptor_DESC && result > 0 { + if expectedDirection == sqlbase.IndexDescriptor_ASC && result < 0 || + expectedDirection == sqlbase.IndexDescriptor_DESC && result > 0 { return scrub.WrapError(scrub.IndexKeyDecodingError, errors.Errorf("key ordering did not match datum ordering. IndexDescriptor=%s", expectedDirection)) @@ -1258,7 +1272,7 @@ func (rf *RowFetcher) checkKeyOrdering(ctx context.Context) error { return nil } -func (rf *RowFetcher) finalizeRow() error { +func (rf *Fetcher) finalizeRow() error { table := rf.rowReadyTable // Fill in any missing values with NULLs for i := range table.cols { @@ -1289,7 +1303,7 @@ func (rf *RowFetcher) finalizeRow() error { table.desc.Name, table.cols[i].Name, table.index.Name, strings.Join(table.index.ColumnNames, ","), strings.Join(indexColValues, ","))) } - table.row[i] = EncDatum{ + table.row[i] = sqlbase.EncDatum{ Datum: tree.DNull, } // We've set valueColsFound to the number of present columns in the row @@ -1304,14 +1318,14 @@ func (rf *RowFetcher) finalizeRow() error { // Key returns the next key (the key that follows the last returned row). // Key returns nil when there are no more rows. -func (rf *RowFetcher) Key() roachpb.Key { +func (rf *Fetcher) Key() roachpb.Key { return rf.kv.Key } // PartialKey returns a partial slice of the next key (the key that follows the // last returned row) containing nCols columns, without the ending column // family. Returns nil when there are no more rows. -func (rf *RowFetcher) PartialKey(nCols int) (roachpb.Key, error) { +func (rf *Fetcher) PartialKey(nCols int) (roachpb.Key, error) { if rf.kv.Key == nil { return nil, nil } @@ -1325,7 +1339,7 @@ func (rf *RowFetcher) PartialKey(nCols int) (roachpb.Key, error) { // GetRangeInfo returns information about the ranges where the rows came from. // The RangeInfo's are deduped and not ordered. -func (rf *RowFetcher) GetRangeInfo() []roachpb.RangeInfo { +func (rf *Fetcher) GetRangeInfo() []roachpb.RangeInfo { return rf.kvFetcher.getRangesInfo() } @@ -1334,3 +1348,60 @@ func (rf *RowFetcher) GetRangeInfo() []roachpb.RangeInfo { func hasExtraCols(table *tableInfo) bool { return table.isSecondaryIndex && table.index.Unique } + +// consumeIndexKeyWithoutTableIDIndexIDPrefix consumes an index key that's +// already pre-stripped of its table ID index ID prefix, up to nCols columns, +// returning the number of bytes consumed. For example, given an input key +// with values (6,7,8,9) such as /Table/60/1/6/7/#/61/1/8/9, stripping 3 columns +// from this key would eat all but the final, 4th column 9 in this example, +// producing /Table/60/1/6/7/#/61/1/8. If nCols was 2, instead, the result +// would include the trailing table ID index ID pair, since that's a more +// precise key: /Table/60/1/6/7/#/61/1. +func consumeIndexKeyWithoutTableIDIndexIDPrefix( + index *sqlbase.IndexDescriptor, nCols int, key []byte, +) (int, error) { + origKeyLen := len(key) + consumedCols := 0 + for _, ancestor := range index.Interleave.Ancestors { + length := int(ancestor.SharedPrefixLen) + // Skip up to length values. + for j := 0; j < length; j++ { + if consumedCols == nCols { + // We're done early, in the middle of an interleave. + return origKeyLen - len(key), nil + } + l, err := encoding.PeekLength(key) + if err != nil { + return 0, err + } + key = key[l:] + consumedCols++ + } + var ok bool + key, ok = encoding.DecodeIfInterleavedSentinel(key) + if !ok { + return 0, errors.New("unexpected lack of sentinel key") + } + + // Skip the TableID/IndexID pair for each ancestor except for the + // first, which has already been skipped in our input. + for j := 0; j < 2; j++ { + idLen, err := encoding.PeekLength(key) + if err != nil { + return 0, err + } + key = key[idLen:] + } + } + + // Decode the remaining values in the key, in the final interleave. + for ; consumedCols < nCols; consumedCols++ { + l, err := encoding.PeekLength(key) + if err != nil { + return 0, err + } + key = key[l:] + } + + return origKeyLen - len(key), nil +} diff --git a/pkg/sql/sqlbase/rowfetcher_mvcc_test.go b/pkg/sql/row/fetcher_mvcc_test.go similarity index 96% rename from pkg/sql/sqlbase/rowfetcher_mvcc_test.go rename to pkg/sql/row/fetcher_mvcc_test.go index e8b4323514f0..97f1a99b1110 100644 --- a/pkg/sql/sqlbase/rowfetcher_mvcc_test.go +++ b/pkg/sql/row/fetcher_mvcc_test.go @@ -12,7 +12,7 @@ // implied. See the License for the specific language governing // permissions and limitations under the License. -package sqlbase_test +package row_test import ( "context" @@ -20,17 +20,17 @@ import ( "reflect" "testing" + "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/storage/engine" - "github.com/cockroachdb/cockroach/pkg/util" - - "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" + "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" ) @@ -78,7 +78,7 @@ func TestRowFetcherMVCCMetadata(t *testing.T) { parentDesc := sqlbase.GetTableDescriptor(kvDB, `d`, `parent`) childDesc := sqlbase.GetTableDescriptor(kvDB, `d`, `child`) - var args []sqlbase.RowFetcherTableArgs + var args []row.FetcherTableArgs for _, desc := range []*sqlbase.TableDescriptor{parentDesc, childDesc} { colIdxMap := make(map[sqlbase.ColumnID]int) var valNeededForCol util.FastIntSet @@ -86,7 +86,7 @@ func TestRowFetcherMVCCMetadata(t *testing.T) { colIdxMap[col.ID] = colIdx valNeededForCol.Add(colIdx) } - args = append(args, sqlbase.RowFetcherTableArgs{ + args = append(args, row.FetcherTableArgs{ Spans: desc.AllIndexSpans(), Desc: desc, Index: &desc.PrimaryIndex, @@ -96,7 +96,7 @@ func TestRowFetcherMVCCMetadata(t *testing.T) { ValNeededForCol: valNeededForCol, }) } - var rf sqlbase.RowFetcher + var rf row.Fetcher if err := rf.Init( false /* reverse */, false /* returnRangeInfo */, true /* isCheck */, &sqlbase.DatumAlloc{}, args..., @@ -113,7 +113,7 @@ func TestRowFetcherMVCCMetadata(t *testing.T) { log.Info(ctx, kv.Key, kv.Value.Timestamp, kv.Value.PrettyPrint()) } - if err := rf.StartScanFrom(ctx, &sqlbase.SpanKVFetcher{KVs: kvs}); err != nil { + if err := rf.StartScanFrom(ctx, &row.SpanKVFetcher{KVs: kvs}); err != nil { t.Fatal(err) } var rows []rowWithMVCCMetadata diff --git a/pkg/sql/sqlbase/rowfetcher_test.go b/pkg/sql/row/fetcher_test.go similarity index 95% rename from pkg/sql/sqlbase/rowfetcher_test.go rename to pkg/sql/row/fetcher_test.go index e28a51f4e2e3..3cab7867d25b 100644 --- a/pkg/sql/sqlbase/rowfetcher_test.go +++ b/pkg/sql/row/fetcher_test.go @@ -12,7 +12,7 @@ // implied. See the License for the specific language governing // permissions and limitations under the License. -package sqlbase +package row import ( "context" @@ -26,6 +26,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/util" @@ -34,17 +35,17 @@ import ( ) type initFetcherArgs struct { - tableDesc *TableDescriptor + tableDesc *sqlbase.TableDescriptor indexIdx int valNeededForCol util.FastIntSet spans roachpb.Spans } -func makeFetcherArgs(entries []initFetcherArgs) []RowFetcherTableArgs { - fetcherArgs := make([]RowFetcherTableArgs, len(entries)) +func makeFetcherArgs(entries []initFetcherArgs) []FetcherTableArgs { + fetcherArgs := make([]FetcherTableArgs, len(entries)) for i, entry := range entries { - var index *IndexDescriptor + var index *sqlbase.IndexDescriptor var isSecondaryIndex bool if entry.indexIdx > 0 { @@ -54,7 +55,7 @@ func makeFetcherArgs(entries []initFetcherArgs) []RowFetcherTableArgs { index = &entry.tableDesc.PrimaryIndex } - fetcherArgs[i] = RowFetcherTableArgs{ + fetcherArgs[i] = FetcherTableArgs{ Spans: entry.spans, Desc: entry.tableDesc, Index: index, @@ -68,9 +69,9 @@ func makeFetcherArgs(entries []initFetcherArgs) []RowFetcherTableArgs { } func initFetcher( - entries []initFetcherArgs, reverseScan bool, alloc *DatumAlloc, -) (fetcher *RowFetcher, err error) { - fetcher = &RowFetcher{} + entries []initFetcherArgs, reverseScan bool, alloc *sqlbase.DatumAlloc, +) (fetcher *Fetcher, err error) { + fetcher = &Fetcher{} fetcherArgs := makeFetcherArgs(entries) @@ -137,12 +138,12 @@ func TestNextRowSingle(t *testing.T) { ) } - alloc := &DatumAlloc{} + alloc := &sqlbase.DatumAlloc{} // We try to read rows from each table. for tableName, table := range tables { t.Run(tableName, func(t *testing.T) { - tableDesc := GetTableDescriptor(kvDB, sqlutils.TestDB, tableName) + tableDesc := sqlbase.GetTableDescriptor(kvDB, sqlutils.TestDB, tableName) var valNeededForCol util.FastIntSet valNeededForCol.AddRange(0, table.nCols-1) @@ -257,12 +258,12 @@ func TestNextRowBatchLimiting(t *testing.T) { ) } - alloc := &DatumAlloc{} + alloc := &sqlbase.DatumAlloc{} // We try to read rows from each table. for tableName, table := range tables { t.Run(tableName, func(t *testing.T) { - tableDesc := GetTableDescriptor(kvDB, sqlutils.TestDB, tableName) + tableDesc := sqlbase.GetTableDescriptor(kvDB, sqlutils.TestDB, tableName) var valNeededForCol util.FastIntSet valNeededForCol.AddRange(0, table.nCols-1) @@ -372,9 +373,9 @@ INDEX(c) ), ) - alloc := &DatumAlloc{} + alloc := &sqlbase.DatumAlloc{} - tableDesc := GetTableDescriptor(kvDB, sqlutils.TestDB, tableName) + tableDesc := sqlbase.GetTableDescriptor(kvDB, sqlutils.TestDB, tableName) var valNeededForCol util.FastIntSet valNeededForCol.AddRange(0, table.nCols-1) @@ -537,11 +538,11 @@ func TestNextRowSecondaryIndex(t *testing.T) { table.nRows += nNulls } - alloc := &DatumAlloc{} + alloc := &sqlbase.DatumAlloc{} // We try to read rows from each index. for tableName, table := range tables { t.Run(tableName, func(t *testing.T) { - tableDesc := GetTableDescriptor(kvDB, sqlutils.TestDB, tableName) + tableDesc := sqlbase.GetTableDescriptor(kvDB, sqlutils.TestDB, tableName) var valNeededForCol util.FastIntSet valNeededForCol.AddRange(0, table.nVals-1) @@ -867,7 +868,7 @@ func TestNextRowInterleaved(t *testing.T) { } } - alloc := &DatumAlloc{} + alloc := &sqlbase.DatumAlloc{} // Retrieve rows from every non-empty subset of the tables/indexes. for _, idxs := range generateIdxSubsets(len(interleaveEntries)-1, nil) { // Initialize our subset of tables/indexes. @@ -893,8 +894,8 @@ func TestNextRowInterleaved(t *testing.T) { // RowFetcher. idLookups := make(map[uint64]*fetcherEntryArgs, len(entries)) for i, entry := range entries { - tableDesc := GetTableDescriptor(kvDB, sqlutils.TestDB, entry.tableName) - var indexID IndexID + tableDesc := sqlbase.GetTableDescriptor(kvDB, sqlutils.TestDB, entry.tableName) + var indexID sqlbase.IndexID if entry.indexIdx == 0 { indexID = tableDesc.PrimaryIndex.ID } else { @@ -1012,7 +1013,7 @@ func TestRowFetcherReset(t *testing.T) { 0, sqlutils.ToRowFn(sqlutils.RowIdxFn, sqlutils.RowModuloFn(1)), ) - tableDesc := GetTableDescriptor(kvDB, sqlutils.TestDB, "foo") + tableDesc := sqlbase.GetTableDescriptor(kvDB, sqlutils.TestDB, "foo") var valNeededForCol util.FastIntSet valNeededForCol.AddRange(0, 1) args := []initFetcherArgs{ @@ -1022,7 +1023,7 @@ func TestRowFetcherReset(t *testing.T) { valNeededForCol: valNeededForCol, }, } - da := DatumAlloc{} + da := sqlbase.DatumAlloc{} fetcher, err := initFetcher(args, false, &da) if err != nil { t.Fatal(err) @@ -1053,6 +1054,6 @@ func TestRowFetcherReset(t *testing.T) { } -func idLookupKey(tableID ID, indexID IndexID) uint64 { +func idLookupKey(tableID ID, indexID sqlbase.IndexID) uint64 { return (uint64(tableID) << 32) | uint64(indexID) } diff --git a/pkg/sql/sqlbase/fk.go b/pkg/sql/row/fk.go similarity index 87% rename from pkg/sql/sqlbase/fk.go rename to pkg/sql/row/fk.go index 4703f0d7c5d9..39ff001aa2af 100644 --- a/pkg/sql/sqlbase/fk.go +++ b/pkg/sql/row/fk.go @@ -12,7 +12,7 @@ // implied. See the License for the specific language governing // permissions and limitations under the License. -package sqlbase +package row import ( "context" @@ -26,9 +26,13 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/util/log" ) +// ID is an alias for sqlbase.ID. +type ID = sqlbase.ID + // TableLookupsByID maps table IDs to looked up descriptors or, for tables that // exist but are not yet public/leasable, entries with just the IsAdding flag. type TableLookupsByID map[ID]TableLookup @@ -38,9 +42,9 @@ type TableLookupsByID map[ID]TableLookup // flag. // This also includes an optional CheckHelper for the table. type TableLookup struct { - Table *TableDescriptor + Table *sqlbase.TableDescriptor IsAdding bool - CheckHelper *CheckHelper + CheckHelper *sqlbase.CheckHelper } // TableLookupFunction is the function type used by TablesNeededForFKs that will @@ -55,11 +59,11 @@ func NoLookup(_ context.Context, _ ID) (TableLookup, error) { // CheckPrivilegeFunction is the function type used by TablesNeededForFKs that will // check the privileges of the current user to access specific tables. -type CheckPrivilegeFunction func(context.Context, DescriptorProto, privilege.Kind) error +type CheckPrivilegeFunction func(context.Context, sqlbase.DescriptorProto, privilege.Kind) error // NoCheckPrivilege can be used to not perform any privilege checks during a // TablesNeededForFKs function call. -func NoCheckPrivilege(_ context.Context, _ DescriptorProto, _ privilege.Kind) error { +func NoCheckPrivilege(_ context.Context, _ sqlbase.DescriptorProto, _ privilege.Kind) error { return nil } @@ -86,15 +90,17 @@ type tableLookupQueue struct { tableLookups TableLookupsByID lookup TableLookupFunction checkPrivilege CheckPrivilegeFunction - analyzeExpr AnalyzeExprFunction + analyzeExpr sqlbase.AnalyzeExprFunction } -func (tl *TableLookup) addCheckHelper(ctx context.Context, analyzeExpr AnalyzeExprFunction) error { +func (tl *TableLookup) addCheckHelper( + ctx context.Context, analyzeExpr sqlbase.AnalyzeExprFunction, +) error { if analyzeExpr == nil { return nil } tableName := tree.MakeUnqualifiedTableName(tree.Name(tl.Table.Name)) - tl.CheckHelper = &CheckHelper{} + tl.CheckHelper = &sqlbase.CheckHelper{} return tl.CheckHelper.Init(ctx, analyzeExpr, &tableName, tl.Table) } @@ -176,11 +182,11 @@ func (q *tableLookupQueue) dequeue() (TableLookup, FKCheck, bool) { // CheckHelpers are required. func TablesNeededForFKs( ctx context.Context, - table TableDescriptor, + table sqlbase.TableDescriptor, usage FKCheck, lookup TableLookupFunction, checkPrivilege CheckPrivilegeFunction, - analyzeExpr AnalyzeExprFunction, + analyzeExpr sqlbase.AnalyzeExprFunction, ) (TableLookupsByID, error) { queue := tableLookupQueue{ tableLookups: make(TableLookupsByID), @@ -238,9 +244,9 @@ func TablesNeededForFKs( if curUsage == CheckDeletes { var nextUsage FKCheck switch referencedIdx.ForeignKey.OnDelete { - case ForeignKeyReference_CASCADE: + case sqlbase.ForeignKeyReference_CASCADE: nextUsage = CheckDeletes - case ForeignKeyReference_SET_DEFAULT, ForeignKeyReference_SET_NULL: + case sqlbase.ForeignKeyReference_SET_DEFAULT, sqlbase.ForeignKeyReference_SET_NULL: nextUsage = CheckUpdates default: // There is no need to check any other relationships. @@ -251,9 +257,9 @@ func TablesNeededForFKs( } } else { // curUsage == CheckUpdates - if referencedIdx.ForeignKey.OnUpdate == ForeignKeyReference_CASCADE || - referencedIdx.ForeignKey.OnUpdate == ForeignKeyReference_SET_DEFAULT || - referencedIdx.ForeignKey.OnUpdate == ForeignKeyReference_SET_NULL { + if referencedIdx.ForeignKey.OnUpdate == sqlbase.ForeignKeyReference_CASCADE || + referencedIdx.ForeignKey.OnUpdate == sqlbase.ForeignKeyReference_SET_DEFAULT || + referencedIdx.ForeignKey.OnUpdate == sqlbase.ForeignKeyReference_SET_NULL { if err := queue.enqueue( ctx, referencedTableLookup.Table.ID, CheckUpdates, ); err != nil { @@ -389,7 +395,7 @@ type fkInsertHelper struct { // each index. These slices will have at most one entry, since there can be // at most one outgoing foreign key per index. We use this data structure // instead of a one-to-one map for consistency with the other insert helpers. - fks map[IndexID][]baseFKHelper + fks map[sqlbase.IndexID][]baseFKHelper checker *fkBatchChecker } @@ -398,10 +404,10 @@ var errSkipUnusedFK = errors.New("no columns involved in FK included in writer") func makeFKInsertHelper( txn *client.Txn, - table TableDescriptor, + table sqlbase.TableDescriptor, otherTables TableLookupsByID, - colMap map[ColumnID]int, - alloc *DatumAlloc, + colMap map[sqlbase.ColumnID]int, + alloc *sqlbase.DatumAlloc, ) (fkInsertHelper, error) { h := fkInsertHelper{ checker: &fkBatchChecker{ @@ -418,7 +424,7 @@ func makeFKInsertHelper( return h, err } if h.fks == nil { - h.fks = make(map[IndexID][]baseFKHelper) + h.fks = make(map[sqlbase.IndexID][]baseFKHelper) } h.fks[idx.ID] = append(h.fks[idx.ID], fk) } @@ -451,8 +457,8 @@ func (h fkInsertHelper) CollectSpansForValues(values tree.Datums) (roachpb.Spans func checkIdx( ctx context.Context, checker *fkBatchChecker, - fks map[IndexID][]baseFKHelper, - idx IndexID, + fks map[sqlbase.IndexID][]baseFKHelper, + idx sqlbase.IndexID, row tree.Datums, ) error { for i, fk := range fks[idx] { @@ -478,19 +484,19 @@ func checkIdx( } type fkDeleteHelper struct { - fks map[IndexID][]baseFKHelper + fks map[sqlbase.IndexID][]baseFKHelper otherTables TableLookupsByID - alloc *DatumAlloc + alloc *sqlbase.DatumAlloc checker *fkBatchChecker } func makeFKDeleteHelper( txn *client.Txn, - table TableDescriptor, + table sqlbase.TableDescriptor, otherTables TableLookupsByID, - colMap map[ColumnID]int, - alloc *DatumAlloc, + colMap map[sqlbase.ColumnID]int, + alloc *sqlbase.DatumAlloc, ) (fkDeleteHelper, error) { h := fkDeleteHelper{ otherTables: otherTables, @@ -514,7 +520,7 @@ func makeFKDeleteHelper( return fkDeleteHelper{}, err } if h.fks == nil { - h.fks = make(map[IndexID][]baseFKHelper) + h.fks = make(map[sqlbase.IndexID][]baseFKHelper) } h.fks[idx.ID] = append(h.fks[idx.ID], fk) } @@ -548,20 +554,20 @@ type fkUpdateHelper struct { inbound fkDeleteHelper // Check old values are not referenced. outbound fkInsertHelper // Check rows referenced by new values still exist. - indexIDsToCheck map[IndexID]struct{} // List of Index IDs to check + indexIDsToCheck map[sqlbase.IndexID]struct{} // List of Index IDs to check checker *fkBatchChecker } func makeFKUpdateHelper( txn *client.Txn, - table TableDescriptor, + table sqlbase.TableDescriptor, otherTables TableLookupsByID, - colMap map[ColumnID]int, - alloc *DatumAlloc, + colMap map[sqlbase.ColumnID]int, + alloc *sqlbase.DatumAlloc, ) (fkUpdateHelper, error) { ret := fkUpdateHelper{ - indexIDsToCheck: make(map[IndexID]struct{}), + indexIDsToCheck: make(map[sqlbase.IndexID]struct{}), } var err error if ret.inbound, err = makeFKDeleteHelper(txn, table, otherTables, colMap, alloc); err != nil { @@ -573,8 +579,10 @@ func makeFKUpdateHelper( return ret, err } -func (fks fkUpdateHelper) addCheckForIndex(indexID IndexID, descriptorType IndexDescriptor_Type) { - if descriptorType == IndexDescriptor_FORWARD { +func (fks fkUpdateHelper) addCheckForIndex( + indexID sqlbase.IndexID, descriptorType sqlbase.IndexDescriptor_Type, +) { + if descriptorType == sqlbase.IndexDescriptor_FORWARD { fks.indexIDsToCheck[indexID] = struct{}{} } } @@ -619,30 +627,30 @@ func (fks fkUpdateHelper) CollectSpansForValues(values tree.Datums) (roachpb.Spa type baseFKHelper struct { txn *client.Txn - rf RowFetcher - searchTable *TableDescriptor // the table being searched (for err msg) - searchIdx *IndexDescriptor // the index that must (not) contain a value + rf Fetcher + searchTable *sqlbase.TableDescriptor // the table being searched (for err msg) + searchIdx *sqlbase.IndexDescriptor // the index that must (not) contain a value prefixLen int - writeIdx IndexDescriptor // the index we want to modify - searchPrefix []byte // prefix of keys in searchIdx - ids map[ColumnID]int // col IDs - dir FKCheck // direction of check + writeIdx sqlbase.IndexDescriptor // the index we want to modify + searchPrefix []byte // prefix of keys in searchIdx + ids map[sqlbase.ColumnID]int // col IDs + dir FKCheck // direction of check } func makeBaseFKHelper( txn *client.Txn, otherTables TableLookupsByID, - writeIdx IndexDescriptor, - ref ForeignKeyReference, - colMap map[ColumnID]int, - alloc *DatumAlloc, + writeIdx sqlbase.IndexDescriptor, + ref sqlbase.ForeignKeyReference, + colMap map[sqlbase.ColumnID]int, + alloc *sqlbase.DatumAlloc, dir FKCheck, ) (baseFKHelper, error) { b := baseFKHelper{txn: txn, writeIdx: writeIdx, searchTable: otherTables[ref.Table].Table, dir: dir} if b.searchTable == nil { return b, errors.Errorf("referenced table %d not in provided table map %+v", ref.Table, otherTables) } - b.searchPrefix = MakeIndexKeyPrefix(b.searchTable, ref.Index) + b.searchPrefix = sqlbase.MakeIndexKeyPrefix(b.searchTable, ref.Index) searchIdx, err := b.searchTable.FindIndexByID(ref.Index) if err != nil { return b, err @@ -652,7 +660,7 @@ func makeBaseFKHelper( b.prefixLen = len(writeIdx.ColumnIDs) } b.searchIdx = searchIdx - tableArgs := RowFetcherTableArgs{ + tableArgs := FetcherTableArgs{ Desc: b.searchTable, Index: b.searchIdx, ColIdxMap: b.searchTable.ColumnIdxMap(), @@ -666,7 +674,7 @@ func makeBaseFKHelper( // Check for all NULL values, since these can skip FK checking in MATCH FULL // TODO(bram): add MATCH SIMPLE and fix MATCH FULL #30026 - b.ids = make(map[ColumnID]int, len(writeIdx.ColumnIDs)) + b.ids = make(map[sqlbase.ColumnID]int, len(writeIdx.ColumnIDs)) nulls := true var missingColumns []string for i, writeColID := range writeIdx.ColumnIDs[:b.prefixLen] { @@ -695,7 +703,7 @@ func makeBaseFKHelper( func (f baseFKHelper) spanForValues(values tree.Datums) (roachpb.Span, error) { var key roachpb.Key if values != nil { - keyBytes, _, err := EncodePartialIndexKey( + keyBytes, _, err := sqlbase.EncodePartialIndexKey( f.searchTable, f.searchIdx, f.prefixLen, f.ids, values, f.searchPrefix) if err != nil { return roachpb.Span{}, err @@ -722,7 +730,7 @@ var _ FkSpanCollector = fkInsertHelper{} var _ FkSpanCollector = fkDeleteHelper{} var _ FkSpanCollector = fkUpdateHelper{} -func collectSpansWithFKMap(fks map[IndexID][]baseFKHelper) roachpb.Spans { +func collectSpansWithFKMap(fks map[sqlbase.IndexID][]baseFKHelper) roachpb.Spans { var reads roachpb.Spans for idx := range fks { for _, fk := range fks[idx] { @@ -733,7 +741,7 @@ func collectSpansWithFKMap(fks map[IndexID][]baseFKHelper) roachpb.Spans { } func collectSpansForValuesWithFKMap( - fks map[IndexID][]baseFKHelper, values tree.Datums, + fks map[sqlbase.IndexID][]baseFKHelper, values tree.Datums, ) (roachpb.Spans, error) { var reads roachpb.Spans for idx := range fks { diff --git a/pkg/sql/sqlbase/fk_test.go b/pkg/sql/row/fk_test.go similarity index 93% rename from pkg/sql/sqlbase/fk_test.go rename to pkg/sql/row/fk_test.go index 693eb5c77baa..bd84b299eb4e 100644 --- a/pkg/sql/sqlbase/fk_test.go +++ b/pkg/sql/row/fk_test.go @@ -12,7 +12,7 @@ // implied. See the License for the specific language governing // permissions and limitations under the License. -package sqlbase +package row import ( "bytes" @@ -26,21 +26,22 @@ import ( "github.com/pkg/errors" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/log" ) type testTables struct { nextID ID - tablesByID map[ID]*TableDescriptor - tablesByName map[string]*TableDescriptor + tablesByID map[ID]*sqlbase.TableDescriptor + tablesByName map[string]*sqlbase.TableDescriptor } func (t *testTables) createTestTable(name string) ID { - table := &TableDescriptor{ + table := &sqlbase.TableDescriptor{ Name: name, ID: t.nextID, - NextIndexID: IndexID(1), // This must be 1 to avoid clashing with a primary index. + NextIndexID: sqlbase.IndexID(1), // This must be 1 to avoid clashing with a primary index. } t.tablesByID[table.ID] = table t.tablesByName[table.Name] = table @@ -51,8 +52,8 @@ func (t *testTables) createTestTable(name string) ID { func (t *testTables) createForeignKeyReference( referencingID ID, referencedID ID, - onDelete ForeignKeyReference_Action, - onUpdate ForeignKeyReference_Action, + onDelete sqlbase.ForeignKeyReference_Action, + onUpdate sqlbase.ForeignKeyReference_Action, ) error { // Get the tables referencing, exists := t.tablesByID[referencingID] @@ -66,9 +67,9 @@ func (t *testTables) createForeignKeyReference( // Create an index on both tables. referencedIndexID := referenced.NextIndexID referencingIndexID := referencing.NextIndexID - referencedIndex := IndexDescriptor{ + referencedIndex := sqlbase.IndexDescriptor{ ID: referencedIndexID, - ReferencedBy: []ForeignKeyReference{ + ReferencedBy: []sqlbase.ForeignKeyReference{ { Table: referencingID, Index: referencingIndexID, @@ -77,9 +78,9 @@ func (t *testTables) createForeignKeyReference( } referenced.Indexes = append(referenced.Indexes, referencedIndex) - referencingIndex := IndexDescriptor{ + referencingIndex := sqlbase.IndexDescriptor{ ID: referencingIndexID, - ForeignKey: ForeignKeyReference{ + ForeignKey: sqlbase.ForeignKeyReference{ Table: referencedID, OnDelete: onDelete, OnUpdate: onUpdate, @@ -98,8 +99,8 @@ func (t *testTables) createForeignKeyReference( func TestTablesNeededForFKs(t *testing.T) { tables := testTables{ nextID: ID(1), - tablesByID: make(map[ID]*TableDescriptor), - tablesByName: make(map[string]*TableDescriptor), + tablesByID: make(map[ID]*sqlbase.TableDescriptor), + tablesByName: make(map[string]*sqlbase.TableDescriptor), } // First setup the table we will be testing against. @@ -111,19 +112,19 @@ func TestTablesNeededForFKs(t *testing.T) { // For all possible combinations of relationships for foreign keys, create a // table that X references, and one that references X. - for deleteNum, deleteName := range ForeignKeyReference_Action_name { - for updateNum, updateName := range ForeignKeyReference_Action_name { + for deleteNum, deleteName := range sqlbase.ForeignKeyReference_Action_name { + for updateNum, updateName := range sqlbase.ForeignKeyReference_Action_name { subName := fmt.Sprintf("OnDelete%s OnUpdate%s", deleteName, updateName) referencedByX := tables.createTestTable(fmt.Sprintf("X Referenced - %s", subName)) if err := tables.createForeignKeyReference( - xID, referencedByX, ForeignKeyReference_Action(deleteNum), ForeignKeyReference_Action(updateNum), + xID, referencedByX, sqlbase.ForeignKeyReference_Action(deleteNum), sqlbase.ForeignKeyReference_Action(updateNum), ); err != nil { t.Fatalf("could not add index: %s", err) } referencingX := tables.createTestTable(fmt.Sprintf("Referencing X - %s", subName)) if err := tables.createForeignKeyReference( - referencingX, xID, ForeignKeyReference_Action(deleteNum), ForeignKeyReference_Action(updateNum), + referencingX, xID, sqlbase.ForeignKeyReference_Action(deleteNum), sqlbase.ForeignKeyReference_Action(updateNum), ); err != nil { t.Fatalf("could not add index: %s", err) } @@ -136,29 +137,29 @@ func TestTablesNeededForFKs(t *testing.T) { // To go even further, create another set of tables for all possible // foreign key relationships that reference the table that is referencing // X. This will ensure that we bound the tree walking algorithm correctly. - for deleteNum2, deleteName2 := range ForeignKeyReference_Action_name { - for updateNum2, updateName2 := range ForeignKeyReference_Action_name { + for deleteNum2, deleteName2 := range sqlbase.ForeignKeyReference_Action_name { + for updateNum2, updateName2 := range sqlbase.ForeignKeyReference_Action_name { //if deleteNum2 != int32(ForeignKeyReference_CASCADE) || updateNum2 != int32(ForeignKeyReference_CASCADE) { // continue //} subName2 := fmt.Sprintf("Referencing %d - OnDelete%s OnUpdated%s", referencingX, deleteName2, updateName2) referencing2 := tables.createTestTable(subName2) if err := tables.createForeignKeyReference( - referencing2, referencingX, ForeignKeyReference_Action(deleteNum2), ForeignKeyReference_Action(updateNum2), + referencing2, referencingX, sqlbase.ForeignKeyReference_Action(deleteNum2), sqlbase.ForeignKeyReference_Action(updateNum2), ); err != nil { t.Fatalf("could not add index: %s", err) } // Only fetch the next level of tables if a cascade can occur through // the first level. - if deleteNum == int32(ForeignKeyReference_CASCADE) || - deleteNum == int32(ForeignKeyReference_SET_DEFAULT) || - deleteNum == int32(ForeignKeyReference_SET_NULL) { + if deleteNum == int32(sqlbase.ForeignKeyReference_CASCADE) || + deleteNum == int32(sqlbase.ForeignKeyReference_SET_DEFAULT) || + deleteNum == int32(sqlbase.ForeignKeyReference_SET_NULL) { expectedDeleteIDs = append(expectedDeleteIDs, referencing2) } - if updateNum == int32(ForeignKeyReference_CASCADE) || - updateNum == int32(ForeignKeyReference_SET_DEFAULT) || - updateNum == int32(ForeignKeyReference_SET_NULL) { + if updateNum == int32(sqlbase.ForeignKeyReference_CASCADE) || + updateNum == int32(sqlbase.ForeignKeyReference_SET_DEFAULT) || + updateNum == int32(sqlbase.ForeignKeyReference_SET_NULL) { expectedUpdateIDs = append(expectedUpdateIDs, referencing2) } } diff --git a/pkg/sql/row/helper.go b/pkg/sql/row/helper.go new file mode 100644 index 000000000000..12b36f33bc59 --- /dev/null +++ b/pkg/sql/row/helper.go @@ -0,0 +1,138 @@ +// Copyright 2018 The Cockroach Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package row + +import ( + "sort" + + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" + "github.com/cockroachdb/cockroach/pkg/util/encoding" + "github.com/pkg/errors" +) + +// rowHelper has the common methods for table row manipulations. +type rowHelper struct { + TableDesc *sqlbase.TableDescriptor + // Secondary indexes. + Indexes []sqlbase.IndexDescriptor + indexEntries []sqlbase.IndexEntry + + // Computed during initialization for pretty-printing. + primIndexValDirs []encoding.Direction + secIndexValDirs [][]encoding.Direction + + // Computed and cached. + primaryIndexKeyPrefix []byte + primaryIndexCols map[sqlbase.ColumnID]struct{} + sortedColumnFamilies map[sqlbase.FamilyID][]sqlbase.ColumnID +} + +func newRowHelper(desc *sqlbase.TableDescriptor, indexes []sqlbase.IndexDescriptor) rowHelper { + rh := rowHelper{TableDesc: desc, Indexes: indexes} + + // Pre-compute the encoding directions of the index key values for + // pretty-printing in traces. + rh.primIndexValDirs = sqlbase.IndexKeyValDirs(&rh.TableDesc.PrimaryIndex) + + rh.secIndexValDirs = make([][]encoding.Direction, len(rh.Indexes)) + for i, index := range rh.Indexes { + rh.secIndexValDirs[i] = sqlbase.IndexKeyValDirs(&index) + } + + return rh +} + +// encodeIndexes encodes the primary and secondary index keys. The +// secondaryIndexEntries are only valid until the next call to encodeIndexes or +// encodeSecondaryIndexes. +func (rh *rowHelper) encodeIndexes( + colIDtoRowIndex map[sqlbase.ColumnID]int, values []tree.Datum, +) (primaryIndexKey []byte, secondaryIndexEntries []sqlbase.IndexEntry, err error) { + if rh.primaryIndexKeyPrefix == nil { + rh.primaryIndexKeyPrefix = sqlbase.MakeIndexKeyPrefix(rh.TableDesc, + rh.TableDesc.PrimaryIndex.ID) + } + primaryIndexKey, _, err = sqlbase.EncodeIndexKey( + rh.TableDesc, &rh.TableDesc.PrimaryIndex, colIDtoRowIndex, values, rh.primaryIndexKeyPrefix) + if err != nil { + return nil, nil, err + } + secondaryIndexEntries, err = rh.encodeSecondaryIndexes(colIDtoRowIndex, values) + if err != nil { + return nil, nil, err + } + return primaryIndexKey, secondaryIndexEntries, nil +} + +// encodeSecondaryIndexes encodes the secondary index keys. The +// secondaryIndexEntries are only valid until the next call to encodeIndexes or +// encodeSecondaryIndexes. +func (rh *rowHelper) encodeSecondaryIndexes( + colIDtoRowIndex map[sqlbase.ColumnID]int, values []tree.Datum, +) (secondaryIndexEntries []sqlbase.IndexEntry, err error) { + if len(rh.indexEntries) != len(rh.Indexes) { + rh.indexEntries = make([]sqlbase.IndexEntry, len(rh.Indexes)) + } + rh.indexEntries, err = sqlbase.EncodeSecondaryIndexes( + rh.TableDesc, rh.Indexes, colIDtoRowIndex, values, rh.indexEntries) + if err != nil { + return nil, err + } + return rh.indexEntries, nil +} + +// skipColumnInPK returns true if the value at column colID does not need +// to be encoded because it is already part of the primary key. Composite +// datums are considered too, so a composite datum in a PK will return false. +// TODO(dan): This logic is common and being moved into TableDescriptor (see +// #6233). Once it is, use the shared one. +func (rh *rowHelper) skipColumnInPK( + colID sqlbase.ColumnID, family sqlbase.FamilyID, value tree.Datum, +) (bool, error) { + if rh.primaryIndexCols == nil { + rh.primaryIndexCols = make(map[sqlbase.ColumnID]struct{}) + for _, colID := range rh.TableDesc.PrimaryIndex.ColumnIDs { + rh.primaryIndexCols[colID] = struct{}{} + } + } + if _, ok := rh.primaryIndexCols[colID]; !ok { + return false, nil + } + if family != 0 { + return false, errors.Errorf("primary index column %d must be in family 0, was %d", colID, family) + } + if cdatum, ok := value.(tree.CompositeDatum); ok { + // Composite columns are encoded in both the key and the value. + return !cdatum.IsComposite(), nil + } + // Skip primary key columns as their values are encoded in the key of + // each family. Family 0 is guaranteed to exist and acts as a + // sentinel. + return true, nil +} + +func (rh *rowHelper) sortedColumnFamily(famID sqlbase.FamilyID) ([]sqlbase.ColumnID, bool) { + if rh.sortedColumnFamilies == nil { + rh.sortedColumnFamilies = make(map[sqlbase.FamilyID][]sqlbase.ColumnID, len(rh.TableDesc.Families)) + for _, family := range rh.TableDesc.Families { + colIDs := append([]sqlbase.ColumnID(nil), family.ColumnIDs...) + sort.Sort(sqlbase.ColumnIDs(colIDs)) + rh.sortedColumnFamilies[family.ID] = colIDs + } + } + colIDs, ok := rh.sortedColumnFamilies[famID] + return colIDs, ok +} diff --git a/pkg/sql/sqlbase/kvfetcher.go b/pkg/sql/row/kvfetcher.go similarity index 75% rename from pkg/sql/sqlbase/kvfetcher.go rename to pkg/sql/row/kvfetcher.go index eff9aafd2b36..712bb1523f62 100644 --- a/pkg/sql/sqlbase/kvfetcher.go +++ b/pkg/sql/row/kvfetcher.go @@ -12,121 +12,21 @@ // implied. See the License for the specific language governing // permissions and limitations under the License. -package sqlbase +package row import ( "bytes" "context" - "fmt" - "strings" "github.com/pkg/errors" "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/util" - "github.com/cockroachdb/cockroach/pkg/util/encoding" "github.com/cockroachdb/cockroach/pkg/util/log" ) -// IndexKeyValDirs returns the corresponding encoding.Directions for all the -// encoded values in index's "fullest" possible index key, including directions -// for table/index IDs, the interleaved sentinel and the index column values. -// For example, given -// CREATE INDEX foo ON bar (a, b DESC) INTERLEAVED IN PARENT bar (a) -// a typical index key with all values specified could be -// /51/1/42/#/51/2/1337 -// which would return the slice -// {ASC, ASC, ASC, 0, ASC, ASC, DESC} -func IndexKeyValDirs(index *IndexDescriptor) []encoding.Direction { - if index == nil { - return nil - } - - dirs := make([]encoding.Direction, 0, (len(index.Interleave.Ancestors)+1)*2+len(index.ColumnDirections)) - - colIdx := 0 - for _, ancs := range index.Interleave.Ancestors { - // Table/Index IDs are always encoded ascending. - dirs = append(dirs, encoding.Ascending, encoding.Ascending) - for i := 0; i < int(ancs.SharedPrefixLen); i++ { - d, err := index.ColumnDirections[colIdx].ToEncodingDirection() - if err != nil { - panic(err) - } - dirs = append(dirs, d) - colIdx++ - } - - // The interleaved sentinel uses the 0 value for - // encoding.Direction when pretty-printing (see - // encoding.go:prettyPrintFirstValue). - dirs = append(dirs, 0) - } - - // The index's table/index ID. - dirs = append(dirs, encoding.Ascending, encoding.Ascending) - - for colIdx < len(index.ColumnDirections) { - d, err := index.ColumnDirections[colIdx].ToEncodingDirection() - if err != nil { - panic(err) - } - dirs = append(dirs, d) - colIdx++ - } - - return dirs -} - -// PrettyKey pretty-prints the specified key, skipping over the first `skip` -// fields. The pretty printed key looks like: -// -// /Table///... -// -// We always strip off the /Table prefix and then `skip` more fields. Note that -// this assumes that the fields themselves do not contain '/', but that is -// currently true for the fields we care about stripping (the table and index -// ID). -func PrettyKey(valDirs []encoding.Direction, key roachpb.Key, skip int) string { - p := key.StringWithDirs(valDirs) - for i := 0; i <= skip; i++ { - n := strings.IndexByte(p[1:], '/') - if n == -1 { - return "" - } - p = p[n+1:] - } - return p -} - -// PrettySpan returns a human-readable representation of a span. -func PrettySpan(valDirs []encoding.Direction, span roachpb.Span, skip int) string { - var buf bytes.Buffer - fmt.Fprintf(&buf, "%s-%s", PrettyKey(valDirs, span.Key, skip), PrettyKey(valDirs, span.EndKey, skip)) - return buf.String() -} - -// PrettySpans returns a human-readable description of the spans. -// If index is nil, then pretty print subroutines will use their default -// settings. -func PrettySpans(index *IndexDescriptor, spans []roachpb.Span, skip int) string { - if len(spans) == 0 { - return "" - } - - valDirs := IndexKeyValDirs(index) - - var buf bytes.Buffer - for i, span := range spans { - if i > 0 { - buf.WriteString(" ") - } - buf.WriteString(PrettySpan(valDirs, span, skip)) - } - return buf.String() -} - // kvBatchSize is the number of keys we request at a time. // On a single node, 1000 was enough to avoid any performance degradation. On // multi-node clusters, we want bigger chunks to make up for the higher latency. @@ -345,7 +245,7 @@ func (f *txnKVFetcher) fetch(ctx context.Context) error { return errors.Errorf( "span with results after resume span; it shouldn't happen given that "+ "we're only scanning non-overlapping spans. New spans: %s", - PrettySpans(nil, f.spans, 0 /* skip */)) + sqlbase.PrettySpans(nil, f.spans, 0 /* skip */)) } if resumeSpan := header.ResumeSpan; resumeSpan != nil { diff --git a/pkg/sql/row/main_test.go b/pkg/sql/row/main_test.go new file mode 100644 index 000000000000..d28779e71cfb --- /dev/null +++ b/pkg/sql/row/main_test.go @@ -0,0 +1,33 @@ +// Copyright 2018 The Cockroach Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package row_test + +import ( + "os" + "testing" + + "github.com/cockroachdb/cockroach/pkg/security" + "github.com/cockroachdb/cockroach/pkg/security/securitytest" + "github.com/cockroachdb/cockroach/pkg/server" + "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" + "github.com/cockroachdb/cockroach/pkg/util/randutil" +) + +func TestMain(m *testing.M) { + security.SetAssetLoader(securitytest.EmbeddedAssets) + randutil.SeedForTests() + serverutils.InitTestServerFactory(server.TestServerFactory) + os.Exit(m.Run()) +} diff --git a/pkg/sql/sqlbase/rowwriter.go b/pkg/sql/row/writer.go similarity index 75% rename from pkg/sql/sqlbase/rowwriter.go rename to pkg/sql/row/writer.go index 58e0dff16cc5..70cf0518d7eb 100644 --- a/pkg/sql/sqlbase/rowwriter.go +++ b/pkg/sql/row/writer.go @@ -12,13 +12,12 @@ // implied. See the License for the specific language governing // permissions and limitations under the License. -package sqlbase +package row import ( "bytes" "context" "fmt" - "sort" "github.com/pkg/errors" @@ -27,7 +26,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/util/encoding" + "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/util/log" ) @@ -40,125 +39,11 @@ const ( SkipFKs checkFKConstraints = false ) -// rowHelper has the common methods for table row manipulations. -type rowHelper struct { - TableDesc *TableDescriptor - // Secondary indexes. - Indexes []IndexDescriptor - indexEntries []IndexEntry - - // Computed during initialization for pretty-printing. - primIndexValDirs []encoding.Direction - secIndexValDirs [][]encoding.Direction - - // Computed and cached. - primaryIndexKeyPrefix []byte - primaryIndexCols map[ColumnID]struct{} - sortedColumnFamilies map[FamilyID][]ColumnID -} - -func newRowHelper(desc *TableDescriptor, indexes []IndexDescriptor) rowHelper { - rh := rowHelper{TableDesc: desc, Indexes: indexes} - - // Pre-compute the encoding directions of the index key values for - // pretty-printing in traces. - rh.primIndexValDirs = IndexKeyValDirs(&rh.TableDesc.PrimaryIndex) - - rh.secIndexValDirs = make([][]encoding.Direction, len(rh.Indexes)) - for i, index := range rh.Indexes { - rh.secIndexValDirs[i] = IndexKeyValDirs(&index) - } - - return rh -} - -// encodeIndexes encodes the primary and secondary index keys. The -// secondaryIndexEntries are only valid until the next call to encodeIndexes or -// encodeSecondaryIndexes. -func (rh *rowHelper) encodeIndexes( - colIDtoRowIndex map[ColumnID]int, values []tree.Datum, -) (primaryIndexKey []byte, secondaryIndexEntries []IndexEntry, err error) { - if rh.primaryIndexKeyPrefix == nil { - rh.primaryIndexKeyPrefix = MakeIndexKeyPrefix(rh.TableDesc, - rh.TableDesc.PrimaryIndex.ID) - } - primaryIndexKey, _, err = EncodeIndexKey( - rh.TableDesc, &rh.TableDesc.PrimaryIndex, colIDtoRowIndex, values, rh.primaryIndexKeyPrefix) - if err != nil { - return nil, nil, err - } - secondaryIndexEntries, err = rh.encodeSecondaryIndexes(colIDtoRowIndex, values) - if err != nil { - return nil, nil, err - } - return primaryIndexKey, secondaryIndexEntries, nil -} - -// encodeSecondaryIndexes encodes the secondary index keys. The -// secondaryIndexEntries are only valid until the next call to encodeIndexes or -// encodeSecondaryIndexes. -func (rh *rowHelper) encodeSecondaryIndexes( - colIDtoRowIndex map[ColumnID]int, values []tree.Datum, -) (secondaryIndexEntries []IndexEntry, err error) { - if len(rh.indexEntries) != len(rh.Indexes) { - rh.indexEntries = make([]IndexEntry, len(rh.Indexes)) - } - rh.indexEntries, err = EncodeSecondaryIndexes( - rh.TableDesc, rh.Indexes, colIDtoRowIndex, values, rh.indexEntries) - if err != nil { - return nil, err - } - return rh.indexEntries, nil -} - -// skipColumnInPK returns true if the value at column colID does not need -// to be encoded because it is already part of the primary key. Composite -// datums are considered too, so a composite datum in a PK will return false. -// TODO(dan): This logic is common and being moved into TableDescriptor (see -// #6233). Once it is, use the shared one. -func (rh *rowHelper) skipColumnInPK( - colID ColumnID, family FamilyID, value tree.Datum, -) (bool, error) { - if rh.primaryIndexCols == nil { - rh.primaryIndexCols = make(map[ColumnID]struct{}) - for _, colID := range rh.TableDesc.PrimaryIndex.ColumnIDs { - rh.primaryIndexCols[colID] = struct{}{} - } - } - if _, ok := rh.primaryIndexCols[colID]; !ok { - return false, nil - } - if family != 0 { - return false, errors.Errorf("primary index column %d must be in family 0, was %d", colID, family) - } - if cdatum, ok := value.(tree.CompositeDatum); ok { - // Composite columns are encoded in both the key and the value. - return !cdatum.IsComposite(), nil - } - // Skip primary key columns as their values are encoded in the key of - // each family. Family 0 is guaranteed to exist and acts as a - // sentinel. - return true, nil -} - -func (rh *rowHelper) sortedColumnFamily(famID FamilyID) ([]ColumnID, bool) { - if rh.sortedColumnFamilies == nil { - rh.sortedColumnFamilies = make(map[FamilyID][]ColumnID, len(rh.TableDesc.Families)) - for _, family := range rh.TableDesc.Families { - colIDs := append([]ColumnID(nil), family.ColumnIDs...) - sort.Sort(ColumnIDs(colIDs)) - rh.sortedColumnFamilies[family.ID] = colIDs - } - } - colIDs, ok := rh.sortedColumnFamilies[famID] - return colIDs, ok -} - -// RowInserter abstracts the key/value operations for inserting table rows. -type RowInserter struct { +// Inserter abstracts the key/value operations for inserting table rows. +type Inserter struct { Helper rowHelper - InsertCols []ColumnDescriptor - InsertColIDtoRowIndex map[ColumnID]int + InsertCols []sqlbase.ColumnDescriptor + InsertColIDtoRowIndex map[sqlbase.ColumnID]int Fks fkInsertHelper // For allocation avoidance. @@ -168,25 +53,25 @@ type RowInserter struct { value roachpb.Value } -// MakeRowInserter creates a RowInserter for the given table. +// MakeInserter creates a Inserter for the given table. // // insertCols must contain every column in the primary key. -func MakeRowInserter( +func MakeInserter( txn *client.Txn, - tableDesc *TableDescriptor, + tableDesc *sqlbase.TableDescriptor, fkTables TableLookupsByID, - insertCols []ColumnDescriptor, + insertCols []sqlbase.ColumnDescriptor, checkFKs checkFKConstraints, - alloc *DatumAlloc, -) (RowInserter, error) { + alloc *sqlbase.DatumAlloc, +) (Inserter, error) { indexes := tableDesc.Indexes // Also include the secondary indexes in mutation state // DELETE_AND_WRITE_ONLY. if len(tableDesc.Mutations) > 0 { - indexes = make([]IndexDescriptor, 0, len(tableDesc.Indexes)+len(tableDesc.Mutations)) + indexes = make([]sqlbase.IndexDescriptor, 0, len(tableDesc.Indexes)+len(tableDesc.Mutations)) indexes = append(indexes, tableDesc.Indexes...) for _, m := range tableDesc.Mutations { - if m.State == DescriptorMutation_DELETE_AND_WRITE_ONLY { + if m.State == sqlbase.DescriptorMutation_DELETE_AND_WRITE_ONLY { if index := m.GetIndex(); index != nil { indexes = append(indexes, *index) } @@ -194,7 +79,7 @@ func MakeRowInserter( } } - ri := RowInserter{ + ri := Inserter{ Helper: newRowHelper(tableDesc, indexes), InsertCols: insertCols, InsertColIDtoRowIndex: ColIDtoRowIndexFromCols(insertCols), @@ -203,7 +88,7 @@ func MakeRowInserter( for i, col := range tableDesc.PrimaryIndex.ColumnIDs { if _, ok := ri.InsertColIDtoRowIndex[col]; !ok { - return RowInserter{}, fmt.Errorf("missing %q primary key column", tableDesc.PrimaryIndex.ColumnNames[i]) + return Inserter{}, fmt.Errorf("missing %q primary key column", tableDesc.PrimaryIndex.ColumnNames[i]) } } @@ -267,7 +152,7 @@ type putter interface { // InsertRow adds to the batch the kv operations necessary to insert a table row // with the given values. -func (ri *RowInserter) InsertRow( +func (ri *Inserter) InsertRow( ctx context.Context, b putter, values []tree.Datum, @@ -290,7 +175,7 @@ func (ri *RowInserter) InsertRow( for i, val := range values { // Make sure the value can be written to the column before proceeding. var err error - if ri.marshaled[i], err = MarshalColumnValue(ri.InsertCols[i], val); err != nil { + if ri.marshaled[i], err = sqlbase.MarshalColumnValue(ri.InsertCols[i], val); err != nil { return err } } @@ -356,11 +241,11 @@ func prepareInsertOrUpdateBatch( batch putter, helper *rowHelper, primaryIndexKey []byte, - updatedCols []ColumnDescriptor, + updatedCols []sqlbase.ColumnDescriptor, values []tree.Datum, - valColIDMapping map[ColumnID]int, + valColIDMapping map[sqlbase.ColumnID]int, marshaledValues []roachpb.Value, - marshaledColIDMapping map[ColumnID]int, + marshaledColIDMapping map[sqlbase.ColumnID]int, kvKey *roachpb.Key, kvValue *roachpb.Value, rawValueBuf []byte, @@ -414,7 +299,7 @@ func prepareInsertOrUpdateBatch( rawValueBuf = rawValueBuf[:0] - var lastColID ColumnID + var lastColID sqlbase.ColumnID familySortedColumnIDs, ok := helper.sortedColumnFamily(family.ID) if !ok { return nil, pgerror.NewAssertionErrorf("invalid family sorted column id map") @@ -440,7 +325,7 @@ func prepareInsertOrUpdateBatch( colIDDiff := col.ID - lastColID lastColID = col.ID var err error - rawValueBuf, err = EncodeTableValue(rawValueBuf, colIDDiff, values[idx], nil) + rawValueBuf, err = sqlbase.EncodeTableValue(rawValueBuf, colIDDiff, values[idx], nil) if err != nil { return nil, err } @@ -466,27 +351,27 @@ func prepareInsertOrUpdateBatch( // EncodeIndexesForRow encodes the provided values into their primary and // secondary index keys. The secondaryIndexEntries are only valid until the next // call to EncodeIndexesForRow. -func (ri *RowInserter) EncodeIndexesForRow( +func (ri *Inserter) EncodeIndexesForRow( values []tree.Datum, -) (primaryIndexKey []byte, secondaryIndexEntries []IndexEntry, err error) { +) (primaryIndexKey []byte, secondaryIndexEntries []sqlbase.IndexEntry, err error) { return ri.Helper.encodeIndexes(ri.InsertColIDtoRowIndex, values) } -// RowUpdater abstracts the key/value operations for updating table rows. -type RowUpdater struct { +// Updater abstracts the key/value operations for updating table rows. +type Updater struct { Helper rowHelper DeleteHelper *rowHelper - FetchCols []ColumnDescriptor - FetchColIDtoRowIndex map[ColumnID]int - UpdateCols []ColumnDescriptor - updateColIDtoRowIndex map[ColumnID]int + FetchCols []sqlbase.ColumnDescriptor + FetchColIDtoRowIndex map[sqlbase.ColumnID]int + UpdateCols []sqlbase.ColumnDescriptor + updateColIDtoRowIndex map[sqlbase.ColumnID]int primaryKeyColChange bool - // rd and ri are used when the update this RowUpdater is created for modifies + // rd and ri are used when the update this Updater is created for modifies // the primary key of the table. In that case, rows must be deleted and // re-added instead of merely updated, since the keys are changing. - rd RowDeleter - ri RowInserter + rd Deleter + ri Inserter Fks fkUpdateHelper cascader *cascader @@ -495,7 +380,7 @@ type RowUpdater struct { marshaled []roachpb.Value newValues []tree.Datum key roachpb.Key - indexEntriesBuf []IndexEntry + indexEntriesBuf []sqlbase.IndexEntry valueBuf []byte value roachpb.Value } @@ -503,61 +388,67 @@ type RowUpdater struct { type rowUpdaterType int const ( - // RowUpdaterDefault indicates that a RowUpdater should update everything + // UpdaterDefault indicates that an Updater should update everything // about a row, including secondary indexes. - RowUpdaterDefault rowUpdaterType = 0 - // RowUpdaterOnlyColumns indicates that a RowUpdater should only update the + UpdaterDefault rowUpdaterType = 0 + // UpdaterOnlyColumns indicates that an Updater should only update the // columns of a row. - RowUpdaterOnlyColumns rowUpdaterType = 1 + UpdaterOnlyColumns rowUpdaterType = 1 ) -// MakeRowUpdater creates a RowUpdater for the given table. +// MakeUpdater creates a Updater for the given table. // // UpdateCols are the columns being updated and correspond to the updateValues // that will be passed to UpdateRow. // -// The returned RowUpdater contains a FetchCols field that defines the +// The returned Updater contains a FetchCols field that defines the // expectation of which values are passed as oldValues to UpdateRow. All the columns // passed in requestedCols will be included in FetchCols at the beginning. -func MakeRowUpdater( +func MakeUpdater( txn *client.Txn, - tableDesc *TableDescriptor, + tableDesc *sqlbase.TableDescriptor, fkTables TableLookupsByID, - updateCols []ColumnDescriptor, - requestedCols []ColumnDescriptor, + updateCols []sqlbase.ColumnDescriptor, + requestedCols []sqlbase.ColumnDescriptor, updateType rowUpdaterType, evalCtx *tree.EvalContext, - alloc *DatumAlloc, -) (RowUpdater, error) { - rowUpdater, err := makeRowUpdaterWithoutCascader( + alloc *sqlbase.DatumAlloc, +) (Updater, error) { + rowUpdater, err := makeUpdaterWithoutCascader( txn, tableDesc, fkTables, updateCols, requestedCols, updateType, alloc, ) if err != nil { - return RowUpdater{}, err + return Updater{}, err } rowUpdater.cascader, err = makeUpdateCascader( txn, tableDesc, fkTables, updateCols, evalCtx, alloc, ) if err != nil { - return RowUpdater{}, err + return Updater{}, err } return rowUpdater, nil } -// makeRowUpdaterWithoutCascader is the same function as MakeRowUpdated but does not +type returnTrue struct{} + +func (returnTrue) Error() string { panic("unimplemented") } + +var returnTruePseudoError error = returnTrue{} + +// makeUpdaterWithoutCascader is the same function as MakeUpdater but does not // create a cascader. -func makeRowUpdaterWithoutCascader( +func makeUpdaterWithoutCascader( txn *client.Txn, - tableDesc *TableDescriptor, + tableDesc *sqlbase.TableDescriptor, fkTables TableLookupsByID, - updateCols []ColumnDescriptor, - requestedCols []ColumnDescriptor, + updateCols []sqlbase.ColumnDescriptor, + requestedCols []sqlbase.ColumnDescriptor, updateType rowUpdaterType, - alloc *DatumAlloc, -) (RowUpdater, error) { + alloc *sqlbase.DatumAlloc, +) (Updater, error) { updateColIDtoRowIndex := ColIDtoRowIndexFromCols(updateCols) - primaryIndexCols := make(map[ColumnID]struct{}, len(tableDesc.PrimaryIndex.ColumnIDs)) + primaryIndexCols := make(map[sqlbase.ColumnID]struct{}, len(tableDesc.PrimaryIndex.ColumnIDs)) for _, colID := range tableDesc.PrimaryIndex.ColumnIDs { primaryIndexCols[colID] = struct{}{} } @@ -571,8 +462,8 @@ func makeRowUpdaterWithoutCascader( } // Secondary indexes needing updating. - needsUpdate := func(index IndexDescriptor) bool { - if updateType == RowUpdaterOnlyColumns { + needsUpdate := func(index sqlbase.IndexDescriptor) bool { + if updateType == UpdaterOnlyColumns { // Only update columns. return false } @@ -580,7 +471,7 @@ func makeRowUpdaterWithoutCascader( if primaryKeyColChange { return true } - return index.RunOverAllColumns(func(id ColumnID) error { + return index.RunOverAllColumns(func(id sqlbase.ColumnID) error { if _, ok := updateColIDtoRowIndex[id]; ok { return returnTruePseudoError } @@ -588,7 +479,7 @@ func makeRowUpdaterWithoutCascader( }) != nil } - indexes := make([]IndexDescriptor, 0, len(tableDesc.Indexes)+len(tableDesc.Mutations)) + indexes := make([]sqlbase.IndexDescriptor, 0, len(tableDesc.Indexes)+len(tableDesc.Mutations)) for _, index := range tableDesc.Indexes { if needsUpdate(index) { indexes = append(indexes, index) @@ -598,19 +489,19 @@ func makeRowUpdaterWithoutCascader( // Columns of the table to update, including those in delete/write-only state tableCols := tableDesc.Columns if len(tableDesc.Mutations) > 0 { - tableCols = make([]ColumnDescriptor, 0, len(tableDesc.Columns)+len(tableDesc.Mutations)) + tableCols = make([]sqlbase.ColumnDescriptor, 0, len(tableDesc.Columns)+len(tableDesc.Mutations)) tableCols = append(tableCols, tableDesc.Columns...) } - var deleteOnlyIndexes []IndexDescriptor + var deleteOnlyIndexes []sqlbase.IndexDescriptor for _, m := range tableDesc.Mutations { if index := m.GetIndex(); index != nil { if needsUpdate(*index) { switch m.State { - case DescriptorMutation_DELETE_ONLY: + case sqlbase.DescriptorMutation_DELETE_ONLY: if deleteOnlyIndexes == nil { // Allocate at most once. - deleteOnlyIndexes = make([]IndexDescriptor, 0, len(tableDesc.Mutations)) + deleteOnlyIndexes = make([]sqlbase.IndexDescriptor, 0, len(tableDesc.Mutations)) } deleteOnlyIndexes = append(deleteOnlyIndexes, *index) default: @@ -628,7 +519,7 @@ func makeRowUpdaterWithoutCascader( deleteOnlyHelper = &rh } - ru := RowUpdater{ + ru := Updater{ Helper: newRowHelper(tableDesc, indexes), DeleteHelper: deleteOnlyHelper, UpdateCols: updateCols, @@ -646,19 +537,19 @@ func makeRowUpdaterWithoutCascader( if ru.rd, err = makeRowDeleterWithoutCascader( txn, tableDesc, fkTables, tableCols, SkipFKs, alloc, ); err != nil { - return RowUpdater{}, err + return Updater{}, err } ru.FetchCols = ru.rd.FetchCols ru.FetchColIDtoRowIndex = ColIDtoRowIndexFromCols(ru.FetchCols) - if ru.ri, err = MakeRowInserter(txn, tableDesc, fkTables, + if ru.ri, err = MakeInserter(txn, tableDesc, fkTables, tableCols, SkipFKs, alloc); err != nil { - return RowUpdater{}, err + return Updater{}, err } } else { ru.FetchCols = requestedCols[:len(requestedCols):len(requestedCols)] ru.FetchColIDtoRowIndex = ColIDtoRowIndexFromCols(ru.FetchCols) - maybeAddCol := func(colID ColumnID) error { + maybeAddCol := func(colID sqlbase.ColumnID) error { if _, ok := ru.FetchColIDtoRowIndex[colID]; !ok { col, err := tableDesc.FindColumnByID(colID) if err != nil { @@ -671,7 +562,7 @@ func makeRowUpdaterWithoutCascader( } for _, colID := range tableDesc.PrimaryIndex.ColumnIDs { if err := maybeAddCol(colID); err != nil { - return RowUpdater{}, err + return Updater{}, err } } for _, fam := range tableDesc.Families { @@ -685,19 +576,19 @@ func makeRowUpdaterWithoutCascader( if familyBeingUpdated { for _, colID := range fam.ColumnIDs { if err := maybeAddCol(colID); err != nil { - return RowUpdater{}, err + return Updater{}, err } } } } for _, index := range indexes { if err := index.RunOverAllColumns(maybeAddCol); err != nil { - return RowUpdater{}, err + return Updater{}, err } } for _, index := range deleteOnlyIndexes { if err := index.RunOverAllColumns(maybeAddCol); err != nil { - return RowUpdater{}, err + return Updater{}, err } } } @@ -705,7 +596,7 @@ func makeRowUpdaterWithoutCascader( var err error if ru.Fks, err = makeFKUpdateHelper(txn, *tableDesc, fkTables, ru.FetchColIDtoRowIndex, alloc); err != nil { - return RowUpdater{}, err + return Updater{}, err } return ru, nil } @@ -717,7 +608,7 @@ func makeRowUpdaterWithoutCascader( // Note that updateValues only contains the ones that are changing. // // The return value is only good until the next call to UpdateRow. -func (ru *RowUpdater) UpdateRow( +func (ru *Updater) UpdateRow( ctx context.Context, b *client.Batch, oldValues []tree.Datum, @@ -741,7 +632,7 @@ func (ru *RowUpdater) UpdateRow( if err != nil { return nil, err } - var deleteOldSecondaryIndexEntries []IndexEntry + var deleteOldSecondaryIndexEntries []sqlbase.IndexEntry if ru.DeleteHelper != nil { _, deleteOldSecondaryIndexEntries, err = ru.DeleteHelper.encodeIndexes(ru.FetchColIDtoRowIndex, oldValues) if err != nil { @@ -758,7 +649,7 @@ func (ru *RowUpdater) UpdateRow( // happen before index encoding because certain datum types (i.e. tuple) // cannot be used as index values. for i, val := range updateValues { - if ru.marshaled[i], err = MarshalColumnValue(ru.UpdateCols[i], val); err != nil { + if ru.marshaled[i], err = sqlbase.MarshalColumnValue(ru.UpdateCols[i], val); err != nil { return nil, err } } @@ -770,7 +661,7 @@ func (ru *RowUpdater) UpdateRow( } rowPrimaryKeyChanged := false - var newSecondaryIndexEntries []IndexEntry + var newSecondaryIndexEntries []sqlbase.IndexEntry if ru.primaryKeyColChange { var newPrimaryIndexKey []byte newPrimaryIndexKey, newSecondaryIndexEntries, err = @@ -855,7 +746,7 @@ func (ru *RowUpdater) UpdateRow( // We're skipping inverted indexes in this loop, but appending the inverted index entry to the back of // newSecondaryIndexEntries to process later. For inverted indexes we need to remove all old entries before adding // new ones. - if index.Type == IndexDescriptor_INVERTED { + if index.Type == sqlbase.IndexDescriptor_INVERTED { newSecondaryIndexEntries = append(newSecondaryIndexEntries, newSecondaryIndexEntry) oldSecondaryIndexEntries = append(oldSecondaryIndexEntries, oldSecondaryIndexEntry) @@ -937,54 +828,54 @@ func (ru *RowUpdater) UpdateRow( return ru.newValues, nil } -// IsColumnOnlyUpdate returns true if this RowUpdater is only updating column +// IsColumnOnlyUpdate returns true if this Updater is only updating column // data (in contrast to updating the primary key or other indexes). -func (ru *RowUpdater) IsColumnOnlyUpdate() bool { +func (ru *Updater) IsColumnOnlyUpdate() bool { // TODO(dan): This is used in the schema change backfill to assert that it was // configured correctly and will not be doing things it shouldn't. This is an // unfortunate bleeding of responsibility and indicates the abstraction could - // be improved. Specifically, RowUpdater currently has two responsibilities + // be improved. Specifically, Updater currently has two responsibilities // (computing which indexes need to be updated and mapping sql rows to k/v // operations) and these should be split. return !ru.primaryKeyColChange && ru.DeleteHelper == nil && len(ru.Helper.Indexes) == 0 } -// RowDeleter abstracts the key/value operations for deleting table rows. -type RowDeleter struct { +// Deleter abstracts the key/value operations for deleting table rows. +type Deleter struct { Helper rowHelper - FetchCols []ColumnDescriptor - FetchColIDtoRowIndex map[ColumnID]int + FetchCols []sqlbase.ColumnDescriptor + FetchColIDtoRowIndex map[sqlbase.ColumnID]int Fks fkDeleteHelper cascader *cascader // For allocation avoidance. key roachpb.Key } -// MakeRowDeleter creates a RowDeleter for the given table. +// MakeDeleter creates a Deleter for the given table. // -// The returned RowDeleter contains a FetchCols field that defines the +// The returned Deleter contains a FetchCols field that defines the // expectation of which values are passed as values to DeleteRow. Any column // passed in requestedCols will be included in FetchCols. -func MakeRowDeleter( +func MakeDeleter( txn *client.Txn, - tableDesc *TableDescriptor, + tableDesc *sqlbase.TableDescriptor, fkTables TableLookupsByID, - requestedCols []ColumnDescriptor, + requestedCols []sqlbase.ColumnDescriptor, checkFKs checkFKConstraints, evalCtx *tree.EvalContext, - alloc *DatumAlloc, -) (RowDeleter, error) { + alloc *sqlbase.DatumAlloc, +) (Deleter, error) { rowDeleter, err := makeRowDeleterWithoutCascader( txn, tableDesc, fkTables, requestedCols, checkFKs, alloc, ) if err != nil { - return RowDeleter{}, err + return Deleter{}, err } if checkFKs == CheckFKs { var err error rowDeleter.cascader, err = makeDeleteCascader(txn, tableDesc, fkTables, evalCtx, alloc) if err != nil { - return RowDeleter{}, err + return Deleter{}, err } } return rowDeleter, nil @@ -994,15 +885,15 @@ func MakeRowDeleter( // additional cascader. func makeRowDeleterWithoutCascader( txn *client.Txn, - tableDesc *TableDescriptor, + tableDesc *sqlbase.TableDescriptor, fkTables TableLookupsByID, - requestedCols []ColumnDescriptor, + requestedCols []sqlbase.ColumnDescriptor, checkFKs checkFKConstraints, - alloc *DatumAlloc, -) (RowDeleter, error) { + alloc *sqlbase.DatumAlloc, +) (Deleter, error) { indexes := tableDesc.Indexes if len(tableDesc.Mutations) > 0 { - indexes = make([]IndexDescriptor, 0, len(tableDesc.Indexes)+len(tableDesc.Mutations)) + indexes = make([]sqlbase.IndexDescriptor, 0, len(tableDesc.Indexes)+len(tableDesc.Mutations)) indexes = append(indexes, tableDesc.Indexes...) for _, m := range tableDesc.Mutations { if index := m.GetIndex(); index != nil { @@ -1014,7 +905,7 @@ func makeRowDeleterWithoutCascader( fetchCols := requestedCols[:len(requestedCols):len(requestedCols)] fetchColIDtoRowIndex := ColIDtoRowIndexFromCols(fetchCols) - maybeAddCol := func(colID ColumnID) error { + maybeAddCol := func(colID sqlbase.ColumnID) error { if _, ok := fetchColIDtoRowIndex[colID]; !ok { col, err := tableDesc.FindColumnByID(colID) if err != nil { @@ -1027,24 +918,24 @@ func makeRowDeleterWithoutCascader( } for _, colID := range tableDesc.PrimaryIndex.ColumnIDs { if err := maybeAddCol(colID); err != nil { - return RowDeleter{}, err + return Deleter{}, err } } for _, index := range indexes { for _, colID := range index.ColumnIDs { if err := maybeAddCol(colID); err != nil { - return RowDeleter{}, err + return Deleter{}, err } } // The extra columns are needed to fix #14601. for _, colID := range index.ExtraColumnIDs { if err := maybeAddCol(colID); err != nil { - return RowDeleter{}, err + return Deleter{}, err } } } - rd := RowDeleter{ + rd := Deleter{ Helper: newRowHelper(tableDesc, indexes), FetchCols: fetchCols, FetchColIDtoRowIndex: fetchColIDtoRowIndex, @@ -1053,7 +944,7 @@ func makeRowDeleterWithoutCascader( var err error if rd.Fks, err = makeFKDeleteHelper(txn, *tableDesc, fkTables, fetchColIDtoRowIndex, alloc); err != nil { - return RowDeleter{}, err + return Deleter{}, err } } @@ -1064,7 +955,7 @@ func makeRowDeleterWithoutCascader( // with the given values. It also will cascade as required and check for // orphaned rows. The bytesMonitor is only used if cascading/fk checking and can // be nil if not. -func (rd *RowDeleter) DeleteRow( +func (rd *Deleter) DeleteRow( ctx context.Context, b *client.Batch, values []tree.Datum, @@ -1123,8 +1014,12 @@ func (rd *RowDeleter) DeleteRow( // DeleteIndexRow adds to the batch the kv operations necessary to delete a // table row from the given index. -func (rd *RowDeleter) DeleteIndexRow( - ctx context.Context, b *client.Batch, idx *IndexDescriptor, values []tree.Datum, traceKV bool, +func (rd *Deleter) DeleteIndexRow( + ctx context.Context, + b *client.Batch, + idx *sqlbase.IndexDescriptor, + values []tree.Datum, + traceKV bool, ) error { if rd.Fks.checker != nil { if err := rd.Fks.addAllIdxChecks(ctx, values); err != nil { @@ -1134,7 +1029,7 @@ func (rd *RowDeleter) DeleteIndexRow( return err } } - secondaryIndexEntry, err := EncodeSecondaryIndex( + secondaryIndexEntry, err := sqlbase.EncodeSecondaryIndex( rd.Helper.TableDesc, idx, rd.FetchColIDtoRowIndex, values) if err != nil { return err @@ -1152,8 +1047,8 @@ func (rd *RowDeleter) DeleteIndexRow( // ColIDtoRowIndexFromCols groups a slice of ColumnDescriptors by their ID // field, returning a map from ID to ColumnDescriptor. It assumes there are no // duplicate descriptors in the input. -func ColIDtoRowIndexFromCols(cols []ColumnDescriptor) map[ColumnID]int { - colIDtoRowIndex := make(map[ColumnID]int, len(cols)) +func ColIDtoRowIndexFromCols(cols []sqlbase.ColumnDescriptor) map[sqlbase.ColumnID]int { + colIDtoRowIndex := make(map[sqlbase.ColumnID]int, len(cols)) for i, col := range cols { colIDtoRowIndex[col.ID] = i } diff --git a/pkg/sql/scan.go b/pkg/sql/scan.go index 2ded3b55476b..a0fa5657863f 100644 --- a/pkg/sql/scan.go +++ b/pkg/sql/scan.go @@ -24,6 +24,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/distsqlrun" "github.com/cockroachdb/cockroach/pkg/sql/privilege" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sem/types" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -193,11 +194,11 @@ type scanRun struct { // only true when running SCRUB commands. isCheck bool - fetcher sqlbase.RowFetcher + fetcher row.Fetcher } func (n *scanNode) startExec(params runParams) error { - tableArgs := sqlbase.RowFetcherTableArgs{ + tableArgs := row.FetcherTableArgs{ Desc: n.desc, Index: n.index, ColIdxMap: n.colIdxMap, diff --git a/pkg/sql/scan_test.go b/pkg/sql/scan_test.go index 5c0f2939d029..791f943183df 100644 --- a/pkg/sql/scan_test.go +++ b/pkg/sql/scan_test.go @@ -24,8 +24,8 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" ) @@ -129,7 +129,7 @@ func TestScanBatches(t *testing.T) { defer leaktest.AfterTest(t)() // The test will screw around with KVBatchSize; make sure to restore it at the end. - restore := sqlbase.SetKVBatchSize(10) + restore := row.SetKVBatchSize(10) defer restore() s, db, _ := serverutils.StartServer( @@ -175,7 +175,7 @@ func TestScanBatches(t *testing.T) { numSpanValues := []int{0, 1, 2, 3} for _, batch := range batchSizes { - sqlbase.SetKVBatchSize(int64(batch)) + row.SetKVBatchSize(int64(batch)) for _, numSpans := range numSpanValues { testScanBatchQuery(t, db, numSpans, numAs, numBs, false) testScanBatchQuery(t, db, numSpans, numAs, numBs, true) diff --git a/pkg/sql/sqlbase/encoded_datum.go b/pkg/sql/sqlbase/encoded_datum.go index 700e7ea8386e..9d2ee158d4f9 100644 --- a/pkg/sql/sqlbase/encoded_datum.go +++ b/pkg/sql/sqlbase/encoded_datum.go @@ -72,6 +72,16 @@ func (ed *EncDatum) String(typ *ColumnType) string { return ed.stringWithAlloc(typ, nil) } +// BytesEqual is true if the EncDatum's encoded field is equal to the input. +func (ed *EncDatum) BytesEqual(b []byte) bool { + return bytes.Equal(ed.encoded, b) +} + +// EncodedString returns an immutable copy of this EncDatum's encoded field. +func (ed *EncDatum) EncodedString() string { + return string(ed.encoded) +} + // EncDatumOverhead is the overhead of EncDatum in bytes. const EncDatumOverhead = unsafe.Sizeof(EncDatum{}) diff --git a/pkg/sql/sqlbase/errors.go b/pkg/sql/sqlbase/errors.go index 9e7149d9ff0e..f2e11635265a 100644 --- a/pkg/sql/sqlbase/errors.go +++ b/pkg/sql/sqlbase/errors.go @@ -15,15 +15,9 @@ package sqlbase import ( - "context" - "fmt" - "strings" - - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/util" ) // Cockroach error extensions: @@ -76,21 +70,6 @@ func NewNonNullViolationError(columnName string) error { return pgerror.NewErrorf(pgerror.CodeNotNullViolationError, "null value in column %q violates not-null constraint", columnName) } -// NewUniquenessConstraintViolationError creates an error that represents a -// violation of a UNIQUE constraint. -func NewUniquenessConstraintViolationError(index *IndexDescriptor, vals []tree.Datum) error { - valStrs := make([]string, 0, len(vals)) - for _, val := range vals { - valStrs = append(valStrs, val.String()) - } - - return pgerror.NewErrorf(pgerror.CodeUniqueViolationError, - "duplicate key value (%s)=(%s) violates unique constraint %q", - strings.Join(index.ColumnNames, ","), - strings.Join(valStrs, ","), - index.Name) -} - // IsUniquenessConstraintViolationError returns true if the error is for a // uniqueness constraint violation. func IsUniquenessConstraintViolationError(err error) bool { @@ -245,96 +224,3 @@ func errHasCode(err error, code ...string) bool { } return false } - -// singleKVFetcher is a kvFetcher that returns a single kv. -type singleKVFetcher struct { - kvs [1]roachpb.KeyValue - done bool -} - -// nextBatch implements the kvFetcher interface. -func (f *singleKVFetcher) nextBatch( - _ context.Context, -) (ok bool, kvs []roachpb.KeyValue, batchResponse []byte, numKvs int64, err error) { - if f.done { - return false, nil, nil, 0, nil - } - f.done = true - return true, f.kvs[:], nil, 0, nil -} - -// getRangesInfo implements the kvFetcher interface. -func (f *singleKVFetcher) getRangesInfo() []roachpb.RangeInfo { - panic("getRangesInfo() called on singleKVFetcher") -} - -// ConvertBatchError returns a user friendly constraint violation error. -func ConvertBatchError(ctx context.Context, tableDesc *TableDescriptor, b *client.Batch) error { - origPErr := b.MustPErr() - if origPErr.Index == nil { - return origPErr.GoError() - } - j := origPErr.Index.Index - if j >= int32(len(b.Results)) { - panic(fmt.Sprintf("index %d outside of results: %+v", j, b.Results)) - } - result := b.Results[j] - if cErr, ok := origPErr.GetDetail().(*roachpb.ConditionFailedError); ok && len(result.Rows) > 0 { - key := result.Rows[0].Key - // TODO(dan): There's too much internal knowledge of the sql table - // encoding here (and this callsite is the only reason - // DecodeIndexKeyPrefix is exported). Refactor this bit out. - indexID, _, err := DecodeIndexKeyPrefix(tableDesc, key) - if err != nil { - return err - } - index, err := tableDesc.FindIndexByID(indexID) - if err != nil { - return err - } - var rf RowFetcher - - var valNeededForCol util.FastIntSet - valNeededForCol.AddRange(0, len(index.ColumnIDs)-1) - - colIdxMap := make(map[ColumnID]int, len(index.ColumnIDs)) - cols := make([]ColumnDescriptor, len(index.ColumnIDs)) - for i, colID := range index.ColumnIDs { - colIdxMap[colID] = i - col, err := tableDesc.FindColumnByID(colID) - if err != nil { - return err - } - cols[i] = *col - } - - tableArgs := RowFetcherTableArgs{ - Desc: tableDesc, - Index: index, - ColIdxMap: colIdxMap, - IsSecondaryIndex: indexID != tableDesc.PrimaryIndex.ID, - Cols: cols, - ValNeededForCol: valNeededForCol, - } - if err := rf.Init( - false /* reverse */, false /* returnRangeInfo */, false /* isCheck */, &DatumAlloc{}, tableArgs, - ); err != nil { - return err - } - f := singleKVFetcher{kvs: [1]roachpb.KeyValue{{Key: key}}} - if cErr.ActualValue != nil { - f.kvs[0].Value = *cErr.ActualValue - } - // Use the RowFetcher to decode the single kv pair above by passing in - // this singleKVFetcher implementation, which doesn't actually hit KV. - if err := rf.StartScanFrom(ctx, &f); err != nil { - return err - } - datums, _, _, err := rf.NextRowDecoded(ctx) - if err != nil { - return err - } - return NewUniquenessConstraintViolationError(index, datums) - } - return origPErr.GoError() -} diff --git a/pkg/sql/sqlbase/index_encoding.go b/pkg/sql/sqlbase/index_encoding.go index 139b7f203db3..689adc49456a 100644 --- a/pkg/sql/sqlbase/index_encoding.go +++ b/pkg/sql/sqlbase/index_encoding.go @@ -467,63 +467,6 @@ func DecodeIndexKeyWithoutTableIDIndexIDPrefix( return key, true, nil } -// consumeIndexKeyWithoutTableIDIndexIDPrefix consumes an index key that's -// already pre-stripped of its table ID index ID prefix, up to nCols columns, -// returning the number of bytes consumed. For example, given an input key -// with values (6,7,8,9) such as /Table/60/1/6/7/#/61/1/8/9, stripping 3 columns -// from this key would eat all but the final, 4th column 9 in this example, -// producing /Table/60/1/6/7/#/61/1/8. If nCols was 2, instead, the result -// would include the trailing table ID index ID pair, since that's a more -// precise key: /Table/60/1/6/7/#/61/1. -func consumeIndexKeyWithoutTableIDIndexIDPrefix( - index *IndexDescriptor, nCols int, key []byte, -) (int, error) { - origKeyLen := len(key) - consumedCols := 0 - for _, ancestor := range index.Interleave.Ancestors { - length := int(ancestor.SharedPrefixLen) - // Skip up to length values. - for j := 0; j < length; j++ { - if consumedCols == nCols { - // We're done early, in the middle of an interleave. - return origKeyLen - len(key), nil - } - l, err := encoding.PeekLength(key) - if err != nil { - return 0, err - } - key = key[l:] - consumedCols++ - } - var ok bool - key, ok = encoding.DecodeIfInterleavedSentinel(key) - if !ok { - return 0, errors.New("unexpected lack of sentinel key") - } - - // Skip the TableID/IndexID pair for each ancestor except for the - // first, which has already been skipped in our input. - for j := 0; j < 2; j++ { - idLen, err := encoding.PeekLength(key) - if err != nil { - return 0, err - } - key = key[idLen:] - } - } - - // Decode the remaining values in the key, in the final interleave. - for ; consumedCols < nCols; consumedCols++ { - l, err := encoding.PeekLength(key) - if err != nil { - return 0, err - } - key = key[l:] - } - - return origKeyLen - len(key), nil -} - // DecodeKeyVals decodes the values that are part of the key. The decoded // values are stored in the vals. If this slice is nil, the direction // used will default to encoding.Ascending. diff --git a/pkg/sql/sqlbase/keys.go b/pkg/sql/sqlbase/keys.go index 271903405f1b..ef79a62c1bb8 100644 --- a/pkg/sql/sqlbase/keys.go +++ b/pkg/sql/sqlbase/keys.go @@ -15,6 +15,10 @@ package sqlbase import ( + "bytes" + "fmt" + "strings" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/util/encoding" @@ -46,3 +50,101 @@ func MakeDescMetadataKey(descID ID) roachpb.Key { k = encoding.EncodeUvarintAscending(k, uint64(descID)) return keys.MakeFamilyKey(k, uint32(DescriptorTable.Columns[1].ID)) } + +// IndexKeyValDirs returns the corresponding encoding.Directions for all the +// encoded values in index's "fullest" possible index key, including directions +// for table/index IDs, the interleaved sentinel and the index column values. +// For example, given +// CREATE INDEX foo ON bar (a, b DESC) INTERLEAVED IN PARENT bar (a) +// a typical index key with all values specified could be +// /51/1/42/#/51/2/1337 +// which would return the slice +// {ASC, ASC, ASC, 0, ASC, ASC, DESC} +func IndexKeyValDirs(index *IndexDescriptor) []encoding.Direction { + if index == nil { + return nil + } + + dirs := make([]encoding.Direction, 0, (len(index.Interleave.Ancestors)+1)*2+len(index.ColumnDirections)) + + colIdx := 0 + for _, ancs := range index.Interleave.Ancestors { + // Table/Index IDs are always encoded ascending. + dirs = append(dirs, encoding.Ascending, encoding.Ascending) + for i := 0; i < int(ancs.SharedPrefixLen); i++ { + d, err := index.ColumnDirections[colIdx].ToEncodingDirection() + if err != nil { + panic(err) + } + dirs = append(dirs, d) + colIdx++ + } + + // The interleaved sentinel uses the 0 value for + // encoding.Direction when pretty-printing (see + // encoding.go:prettyPrintFirstValue). + dirs = append(dirs, 0) + } + + // The index's table/index ID. + dirs = append(dirs, encoding.Ascending, encoding.Ascending) + + for colIdx < len(index.ColumnDirections) { + d, err := index.ColumnDirections[colIdx].ToEncodingDirection() + if err != nil { + panic(err) + } + dirs = append(dirs, d) + colIdx++ + } + + return dirs +} + +// PrettyKey pretty-prints the specified key, skipping over the first `skip` +// fields. The pretty printed key looks like: +// +// /Table///... +// +// We always strip off the /Table prefix and then `skip` more fields. Note that +// this assumes that the fields themselves do not contain '/', but that is +// currently true for the fields we care about stripping (the table and index +// ID). +func PrettyKey(valDirs []encoding.Direction, key roachpb.Key, skip int) string { + p := key.StringWithDirs(valDirs) + for i := 0; i <= skip; i++ { + n := strings.IndexByte(p[1:], '/') + if n == -1 { + return "" + } + p = p[n+1:] + } + return p +} + +// PrettySpan returns a human-readable representation of a span. +func PrettySpan(valDirs []encoding.Direction, span roachpb.Span, skip int) string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "%s-%s", PrettyKey(valDirs, span.Key, skip), PrettyKey(valDirs, span.EndKey, skip)) + return buf.String() +} + +// PrettySpans returns a human-readable description of the spans. +// If index is nil, then pretty print subroutines will use their default +// settings. +func PrettySpans(index *IndexDescriptor, spans []roachpb.Span, skip int) string { + if len(spans) == 0 { + return "" + } + + valDirs := IndexKeyValDirs(index) + + var buf bytes.Buffer + for i, span := range spans { + if i > 0 { + buf.WriteString(" ") + } + buf.WriteString(PrettySpan(valDirs, span, skip)) + } + return buf.String() +} diff --git a/pkg/sql/sqlbase/row_container.go b/pkg/sql/sqlbase/row_container.go index 102e9617376c..0e11c6961d6c 100644 --- a/pkg/sql/sqlbase/row_container.go +++ b/pkg/sql/sqlbase/row_container.go @@ -120,7 +120,9 @@ func (ti ColTypeInfo) Type(idx int) types.T { return ti.colTypes[idx].ToDatumType() } -func makeColTypeInfo( +// MakeColTypeInfo returns a ColTypeInfo initialized from the given +// TableDescriptor and map from column ID to row index. +func MakeColTypeInfo( tableDesc *TableDescriptor, colIDToRowIndex map[ColumnID]int, ) (ColTypeInfo, error) { colTypeInfo := ColTypeInfo{ diff --git a/pkg/sql/tablewriter.go b/pkg/sql/tablewriter.go index 77289538fd76..bc8c2f646aac 100644 --- a/pkg/sql/tablewriter.go +++ b/pkg/sql/tablewriter.go @@ -18,6 +18,7 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" ) @@ -76,7 +77,7 @@ type tableWriter interface { tableDesc() *sqlbase.TableDescriptor // fkSpanCollector returns the FkSpanCollector for the tableWriter. - fkSpanCollector() sqlbase.FkSpanCollector + fkSpanCollector() row.FkSpanCollector // close frees all resources held by the tableWriter. close(context.Context) @@ -139,7 +140,7 @@ func (tb *tableWriterBase) flushAndStartNewBatch( ctx context.Context, tableDesc *sqlbase.TableDescriptor, ) error { if err := tb.txn.Run(ctx, tb.b); err != nil { - return sqlbase.ConvertBatchError(ctx, tableDesc, tb.b) + return row.ConvertBatchError(ctx, tableDesc, tb.b) } tb.b = tb.txn.NewBatch() tb.batchSize = 0 @@ -163,7 +164,7 @@ func (tb *tableWriterBase) finalize( } if err != nil { - return sqlbase.ConvertBatchError(ctx, tableDesc, tb.b) + return row.ConvertBatchError(ctx, tableDesc, tb.b) } return nil } diff --git a/pkg/sql/tablewriter_delete.go b/pkg/sql/tablewriter_delete.go index 3e37af2ba3a4..fefcdf0380d4 100644 --- a/pkg/sql/tablewriter_delete.go +++ b/pkg/sql/tablewriter_delete.go @@ -21,6 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/util" @@ -33,7 +34,7 @@ import ( type tableDeleter struct { tableWriterBase - rd sqlbase.RowDeleter + rd row.Deleter alloc *sqlbase.DatumAlloc } @@ -65,7 +66,7 @@ func (td *tableDeleter) row( ctx context.Context, values tree.Datums, traceKV bool, ) (tree.Datums, error) { td.batchSize++ - return nil, td.rd.DeleteRow(ctx, td.b, values, sqlbase.CheckFKs, traceKV) + return nil, td.rd.DeleteRow(ctx, td.b, values, row.CheckFKs, traceKV) } // fastPathAvailable returns true if the fastDelete optimization can be used. @@ -202,8 +203,8 @@ func (td *tableDeleter) deleteAllRowsScan( valNeededForCol.Add(idx) } - var rf sqlbase.RowFetcher - tableArgs := sqlbase.RowFetcherTableArgs{ + var rf row.Fetcher + tableArgs := row.FetcherTableArgs{ Desc: td.rd.Helper.TableDesc, Index: &td.rd.Helper.TableDesc.PrimaryIndex, ColIdxMap: td.rd.FetchColIDtoRowIndex, @@ -313,8 +314,8 @@ func (td *tableDeleter) deleteIndexScan( valNeededForCol.Add(idx) } - var rf sqlbase.RowFetcher - tableArgs := sqlbase.RowFetcherTableArgs{ + var rf row.Fetcher + tableArgs := row.FetcherTableArgs{ Desc: td.rd.Helper.TableDesc, Index: &td.rd.Helper.TableDesc.PrimaryIndex, ColIdxMap: td.rd.FetchColIDtoRowIndex, @@ -356,7 +357,7 @@ func (td *tableDeleter) tableDesc() *sqlbase.TableDescriptor { return td.rd.Helper.TableDesc } -func (td *tableDeleter) fkSpanCollector() sqlbase.FkSpanCollector { +func (td *tableDeleter) fkSpanCollector() row.FkSpanCollector { return td.rd.Fks } diff --git a/pkg/sql/tablewriter_insert.go b/pkg/sql/tablewriter_insert.go index efcfb55991c2..a8101177131e 100644 --- a/pkg/sql/tablewriter_insert.go +++ b/pkg/sql/tablewriter_insert.go @@ -18,6 +18,7 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" ) @@ -25,7 +26,7 @@ import ( // tableInserter handles writing kvs and forming table rows for inserts. type tableInserter struct { tableWriterBase - ri sqlbase.RowInserter + ri row.Inserter } // init is part of the tableWriter interface. @@ -39,7 +40,7 @@ func (ti *tableInserter) row( ctx context.Context, values tree.Datums, traceKV bool, ) (tree.Datums, error) { ti.batchSize++ - return nil, ti.ri.InsertRow(ctx, ti.b, values, false, sqlbase.CheckFKs, traceKV) + return nil, ti.ri.InsertRow(ctx, ti.b, values, false, row.CheckFKs, traceKV) } // atBatchEnd is part of the extendedTableWriter interface. @@ -63,7 +64,7 @@ func (ti *tableInserter) tableDesc() *sqlbase.TableDescriptor { } // fkSpanCollector is part of the tableWriter interface. -func (ti *tableInserter) fkSpanCollector() sqlbase.FkSpanCollector { +func (ti *tableInserter) fkSpanCollector() row.FkSpanCollector { return ti.ri.Fks } diff --git a/pkg/sql/tablewriter_update.go b/pkg/sql/tablewriter_update.go index c02b1b7fb77e..222a4f1911b0 100644 --- a/pkg/sql/tablewriter_update.go +++ b/pkg/sql/tablewriter_update.go @@ -18,6 +18,7 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" ) @@ -25,7 +26,7 @@ import ( // tableUpdater handles writing kvs and forming table rows for updates. type tableUpdater struct { tableWriterBase - ru sqlbase.RowUpdater + ru row.Updater } // init is part of the tableWriter interface. @@ -47,7 +48,7 @@ func (tu *tableUpdater) rowForUpdate( ctx context.Context, oldValues, updateValues tree.Datums, traceKV bool, ) (tree.Datums, error) { tu.batchSize++ - return tu.ru.UpdateRow(ctx, tu.b, oldValues, updateValues, sqlbase.CheckFKs, traceKV) + return tu.ru.UpdateRow(ctx, tu.b, oldValues, updateValues, row.CheckFKs, traceKV) } // atBatchEnd is part of the extendedTableWriter interface. @@ -71,7 +72,7 @@ func (tu *tableUpdater) tableDesc() *sqlbase.TableDescriptor { } // fkSpanCollector is part of the tableWriter interface. -func (tu *tableUpdater) fkSpanCollector() sqlbase.FkSpanCollector { +func (tu *tableUpdater) fkSpanCollector() row.FkSpanCollector { return tu.ru.Fks } diff --git a/pkg/sql/tablewriter_upsert.go b/pkg/sql/tablewriter_upsert.go index 2a0aa08eebda..520ed1eed99b 100644 --- a/pkg/sql/tablewriter_upsert.go +++ b/pkg/sql/tablewriter_upsert.go @@ -20,6 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/util" @@ -30,7 +31,7 @@ import ( type tableUpserterBase struct { tableWriterBase - ri sqlbase.RowInserter + ri row.Inserter alloc *sqlbase.DatumAlloc // Should we collect the rows for a RETURNING clause? @@ -180,7 +181,7 @@ func (tu *tableUpserterBase) makeResultFromRow( } // fkSpanCollector is part of the tableWriter interface. -func (tu *tableUpserterBase) fkSpanCollector() sqlbase.FkSpanCollector { +func (tu *tableUpserterBase) fkSpanCollector() row.FkSpanCollector { return tu.ri.Fks } @@ -248,12 +249,12 @@ type tableUpserter struct { updateValues tree.Datums // Set by init. - fkTables sqlbase.TableLookupsByID // for fk checks in update case - ru sqlbase.RowUpdater + fkTables row.TableLookupsByID // for fk checks in update case + ru row.Updater updateColIDtoRowIndex map[sqlbase.ColumnID]int fetchCols []sqlbase.ColumnDescriptor fetchColIDtoRowIndex map[sqlbase.ColumnID]int - fetcher sqlbase.RowFetcher + fetcher row.Fetcher } // init is part of the tableWriter interface. @@ -273,15 +274,15 @@ func (tu *tableUpserter) init(txn *client.Txn, evalCtx *tree.EvalContext) error if len(tu.updateCols) == 0 { tu.fetchCols = requestedCols - tu.fetchColIDtoRowIndex = sqlbase.ColIDtoRowIndexFromCols(requestedCols) + tu.fetchColIDtoRowIndex = row.ColIDtoRowIndexFromCols(requestedCols) } else { - tu.ru, err = sqlbase.MakeRowUpdater( + tu.ru, err = row.MakeUpdater( txn, tableDesc, tu.fkTables, tu.updateCols, requestedCols, - sqlbase.RowUpdaterDefault, + row.UpdaterDefault, evalCtx, tu.alloc, ) @@ -306,7 +307,7 @@ func (tu *tableUpserter) init(txn *client.Txn, evalCtx *tree.EvalContext) error } } - tableArgs := sqlbase.RowFetcherTableArgs{ + tableArgs := row.FetcherTableArgs{ Desc: tableDesc, Index: &tableDesc.PrimaryIndex, ColIdxMap: tu.fetchColIDtoRowIndex, @@ -542,7 +543,7 @@ func (tu *tableUpserter) updateConflictingRow( // containing the updated values for every column in the // table. This is useful for RETURNING, which we collect below. updatedRow, err := tu.ru.UpdateRow( - ctx, b, conflictingRowValues, updateValues, sqlbase.CheckFKs, traceKV, + ctx, b, conflictingRowValues, updateValues, row.CheckFKs, traceKV, ) if err != nil { return nil, nil, err @@ -634,7 +635,7 @@ func (tu *tableUpserter) insertNonConflictingRow( ) (resultRow tree.Datums, newExistingRows []tree.Datums, err error) { // Perform the insert proper. if err := tu.ri.InsertRow( - ctx, b, insertRow, false /* ignoreConflicts */, sqlbase.CheckFKs, traceKV); err != nil { + ctx, b, insertRow, false /* ignoreConflicts */, row.CheckFKs, traceKV); err != nil { return nil, nil, err } diff --git a/pkg/sql/tablewriter_upsert_fast.go b/pkg/sql/tablewriter_upsert_fast.go index af3b0be9d5d7..4841f8e92741 100644 --- a/pkg/sql/tablewriter_upsert_fast.go +++ b/pkg/sql/tablewriter_upsert_fast.go @@ -18,8 +18,8 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" ) // fastTableUpserter implements the fast path for an upsert. See @@ -44,12 +44,12 @@ func (tu *fastTableUpserter) init(txn *client.Txn, _ *tree.EvalContext) error { // row is part of the tableWriter interface. func (tu *fastTableUpserter) row( - ctx context.Context, row tree.Datums, traceKV bool, + ctx context.Context, d tree.Datums, traceKV bool, ) (tree.Datums, error) { tu.batchSize++ // Use the fast path, ignore conflicts. return nil, tu.ri.InsertRow( - ctx, tu.b, row, true /* ignoreConflicts */, sqlbase.CheckFKs, traceKV) + ctx, tu.b, d, true /* ignoreConflicts */, row.CheckFKs, traceKV) } // batchedCount is part of the batchedTableWriter interface. diff --git a/pkg/sql/tablewriter_upsert_strict.go b/pkg/sql/tablewriter_upsert_strict.go index e4d6e148f637..d25ffb64a9cd 100644 --- a/pkg/sql/tablewriter_upsert_strict.go +++ b/pkg/sql/tablewriter_upsert_strict.go @@ -20,6 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" ) @@ -62,7 +63,7 @@ func (tu *strictTableUpserter) atBatchEnd(ctx context.Context, traceKV bool) err continue } - if err := tu.ri.InsertRow(ctx, tu.b, insertRow, true, sqlbase.CheckFKs, traceKV); err != nil { + if err := tu.ri.InsertRow(ctx, tu.b, insertRow, true, row.CheckFKs, traceKV); err != nil { return err } diff --git a/pkg/sql/truncate.go b/pkg/sql/truncate.go index 2185ba709da3..d70c0c691da3 100644 --- a/pkg/sql/truncate.go +++ b/pkg/sql/truncate.go @@ -23,6 +23,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/privilege" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -396,14 +397,14 @@ func truncateTableInChunks( const chunkSize = TableTruncateChunkSize var resume roachpb.Span alloc := &sqlbase.DatumAlloc{} - for row, done := 0, false; !done; row += chunkSize { + for rowIdx, done := 0, false; !done; rowIdx += chunkSize { resumeAt := resume if traceKV { - log.VEventf(ctx, 2, "table %s truncate at row: %d, span: %s", tableDesc.Name, row, resume) + log.VEventf(ctx, 2, "table %s truncate at row: %d, span: %s", tableDesc.Name, rowIdx, resume) } if err := db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { - rd, err := sqlbase.MakeRowDeleter( - txn, tableDesc, nil, nil, sqlbase.SkipFKs, nil /* *tree.EvalContext */, alloc, + rd, err := row.MakeDeleter( + txn, tableDesc, nil, nil, row.SkipFKs, nil /* *tree.EvalContext */, alloc, ) if err != nil { return err diff --git a/pkg/sql/update.go b/pkg/sql/update.go index 5b9cc6f0f821..f28a2a144d46 100644 --- a/pkg/sql/update.go +++ b/pkg/sql/update.go @@ -21,6 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/privilege" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sem/types" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -98,10 +99,10 @@ func (p *planner) Update( } // Determine what are the foreign key tables that are involved in the update. - fkTables, err := sqlbase.TablesNeededForFKs( + fkTables, err := row.TablesNeededForFKs( ctx, *desc, - sqlbase.CheckUpdates, + row.CheckUpdates, p.LookupTableByID, p.CheckPrivilege, p.analyzeExpr, @@ -206,16 +207,16 @@ func (p *planner) Update( } // Create the table updater, which does the bulk of the work. - // As a result of MakeRowUpdater, ru.FetchCols include all the + // As a result of MakeUpdater, ru.FetchCols include all the // columns in the table descriptor + any columns currently in the // process of being added. - ru, err := sqlbase.MakeRowUpdater( + ru, err := row.MakeUpdater( p.txn, desc, fkTables, updateCols, requestedCols, - sqlbase.RowUpdaterDefault, + row.UpdaterDefault, p.EvalContext(), &p.alloc, ) @@ -280,7 +281,7 @@ func (p *planner) Update( // visible. // We do not want these to be available for RETURNING below. // - // MakeRowUpdater guarantees that the first columns of the source + // MakeUpdater guarantees that the first columns of the source // are those specified in requestedCols, which, in the case where // rowsNeeded is true, is also desc.Columns. So we can truncate to // the length of that to only see public columns. diff --git a/pkg/sql/upsert.go b/pkg/sql/upsert.go index ac6143d12ce7..4ff653ba6720 100644 --- a/pkg/sql/upsert.go +++ b/pkg/sql/upsert.go @@ -19,6 +19,7 @@ import ( "fmt" "sync" + "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sem/types" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -50,7 +51,7 @@ func (p *planner) newUpsertNode( ctx context.Context, n *tree.Insert, desc *sqlbase.TableDescriptor, - ri sqlbase.RowInserter, + ri row.Inserter, tn, alias *tree.TableName, sourceRows planNode, needRows bool, @@ -58,7 +59,7 @@ func (p *planner) newUpsertNode( defaultExprs []tree.TypedExpr, computeExprs []tree.TypedExpr, computedCols []sqlbase.ColumnDescriptor, - fkTables sqlbase.TableLookupsByID, + fkTables row.TableLookupsByID, desiredTypes []types.T, ) (res batchedPlanNode, err error) { // Extract the index that will detect upsert conflicts @@ -657,7 +658,7 @@ func (p *planner) newUpsertHelper( // column IDs to row datum positions is straightforward. helper.ccIvarContainer = sqlbase.RowIndexedVarContainer{ Cols: tableDesc.Columns, - Mapping: sqlbase.ColIDtoRowIndexFromCols(tableDesc.Columns), + Mapping: row.ColIDtoRowIndexFromCols(tableDesc.Columns), } return helper, nil