diff --git a/pkg/ccl/backupccl/backup_job.go b/pkg/ccl/backupccl/backup_job.go index 38f8d8af78f4..0514914288a3 100644 --- a/pkg/ccl/backupccl/backup_job.go +++ b/pkg/ccl/backupccl/backup_job.go @@ -37,7 +37,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/storage/cloud" "github.com/cockroachdb/cockroach/pkg/storage/cloudimpl" "github.com/cockroachdb/cockroach/pkg/util/ctxgroup" - "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/util/uuid" @@ -186,7 +185,7 @@ func backup( g := ctxgroup.WithContext(ctx) pkIDs := make(map[uint64]bool) for i := range backupManifest.Descriptors { - if t := descpb.TableFromDescriptor(&backupManifest.Descriptors[i], hlc.Timestamp{}); t != nil { + if t, _, _, _ := descpb.FromDescriptor(&backupManifest.Descriptors[i]); t != nil { pkIDs[roachpb.BulkOpSummaryID(uint64(t.ID), uint64(t.PrimaryIndex.ID))] = true } } @@ -327,7 +326,7 @@ func backup( } var tableStatistics []*stats.TableStatisticProto for i := range backupManifest.Descriptors { - if tableDesc := descpb.TableFromDescriptor(&backupManifest.Descriptors[i], hlc.Timestamp{}); tableDesc != nil { + if tableDesc, _, _, _ := descpb.FromDescriptor(&backupManifest.Descriptors[i]); tableDesc != nil { // Collect all the table stats for this table. tableStatisticsAcc, err := statsCache.GetTableStats(ctx, tableDesc.GetID()) if err != nil { diff --git a/pkg/ccl/backupccl/backup_planning.go b/pkg/ccl/backupccl/backup_planning.go index 360303768d91..2922851ef1c0 100644 --- a/pkg/ccl/backupccl/backup_planning.go +++ b/pkg/ccl/backupccl/backup_planning.go @@ -332,9 +332,9 @@ func spansForAllTableIndexes( // entire interval. DROPPED tables should never later become PUBLIC. // TODO(pbardea): Consider and test the interaction between revision_history // backups and OFFLINE tables. - rawTbl := descpb.TableFromDescriptor(rev.Desc, hlc.Timestamp{}) + rawTbl, _, _, _ := descpb.FromDescriptor(rev.Desc) if rawTbl != nil && rawTbl.State != descpb.DescriptorState_DROP { - tbl := tabledesc.NewImmutable(*rawTbl) + tbl := tabledesc.NewBuilder(rawTbl).BuildImmutableTable() revSpans, err := getLogicallyMergedTableSpans(tbl, added, execCfg.Codec, rev.Time, checkForKVInBounds) if err != nil { @@ -996,7 +996,7 @@ func backupPlanHook( dbsInPrev := make(map[descpb.ID]struct{}) rawDescs := prevBackups[len(prevBackups)-1].Descriptors for i := range rawDescs { - if t := descpb.TableFromDescriptor(&rawDescs[i], hlc.Timestamp{}); t != nil { + if t, _, _, _ := descpb.FromDescriptor(&rawDescs[i]); t != nil { tablesInPrev[t.ID] = struct{}{} } } diff --git a/pkg/ccl/backupccl/backup_test.go b/pkg/ccl/backupccl/backup_test.go index 127853f3aa5d..9b937ec36c40 100644 --- a/pkg/ccl/backupccl/backup_test.go +++ b/pkg/ccl/backupccl/backup_test.go @@ -6058,7 +6058,7 @@ func getMockTableDesc( PrimaryIndex: pkIndex, Indexes: indexes, } - return tabledesc.NewImmutable(mockTableDescriptor) + return tabledesc.NewBuilder(&mockTableDescriptor).BuildImmutableTable() } // Unit tests for the getLogicallyMergedTableSpans() method. diff --git a/pkg/ccl/backupccl/backupbase/BUILD.bazel b/pkg/ccl/backupccl/backupbase/BUILD.bazel index 0fa7f0c5d1b4..faaed6c0bd8c 100644 --- a/pkg/ccl/backupccl/backupbase/BUILD.bazel +++ b/pkg/ccl/backupccl/backupbase/BUILD.bazel @@ -44,6 +44,7 @@ go_test( "//pkg/security/securitytest", "//pkg/server", "//pkg/sql/catalog", + "//pkg/sql/catalog/catalogkv", "//pkg/sql/catalog/dbdesc", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/schemadesc", diff --git a/pkg/ccl/backupccl/backupbase/targets.go b/pkg/ccl/backupccl/backupbase/targets.go index 21df4b0c4f4f..1fe596249f0f 100644 --- a/pkg/ccl/backupccl/backupbase/targets.go +++ b/pkg/ccl/backupccl/backupbase/targets.go @@ -560,7 +560,7 @@ func FullClusterTargets( for _, desc := range allDescs { switch desc := desc.(type) { case catalog.DatabaseDescriptor: - dbDesc := dbdesc.NewImmutable(*desc.DatabaseDesc()) + dbDesc := dbdesc.NewBuilder(desc.DatabaseDesc()).BuildImmutableDatabase() fullClusterDescs = append(fullClusterDescs, desc) if dbDesc.GetID() != systemschema.SystemDB.GetID() { // The only database that isn't being fully backed up is the system DB. diff --git a/pkg/ccl/backupccl/backupbase/targets_test.go b/pkg/ccl/backupccl/backupbase/targets_test.go index af2e972eae7c..b5ead834deb9 100644 --- a/pkg/ccl/backupccl/backupbase/targets_test.go +++ b/pkg/ccl/backupccl/backupbase/targets_test.go @@ -18,6 +18,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc" @@ -48,23 +49,22 @@ func TestDescriptorsMatchingTargets(t *testing.T) { type tbDesc = descpb.TableDescriptor type typDesc = descpb.TypeDescriptor ts1 := hlc.Timestamp{WallTime: 1} - mkTable := func(descriptor tbDesc) catalog.TableDescriptor { - desc := tabledesc.NewImmutable(descriptor) - desc.TableDesc().ModificationTime = ts1 - return desc + mkTable := func(descriptor tbDesc) catalog.Descriptor { + descProto := tabledesc.NewBuilder(&descriptor).BuildImmutable().DescriptorProto() + return catalogkv.NewBuilderWithMVCCTimestamp(descProto, ts1).BuildImmutable() } - mkDB := func(id descpb.ID, name string) *dbdesc.Immutable { + mkDB := func(id descpb.ID, name string) catalog.Descriptor { return &dbdesc.NewInitial(id, name, security.AdminRoleName()).Immutable } - mkTyp := func(desc typDesc) *typedesc.Immutable { + mkTyp := func(desc typDesc) catalog.Descriptor { // Set a default parent schema for the type descriptors. if desc.ParentSchemaID == descpb.InvalidID { desc.ParentSchemaID = keys.PublicSchemaID } - return typedesc.NewImmutable(desc) + return typedesc.NewBuilder(&desc).BuildImmutable() } - mkSchema := func(desc scDesc) *schemadesc.Immutable { - return schemadesc.NewImmutable(desc) + mkSchema := func(desc scDesc) catalog.Descriptor { + return schemadesc.NewBuilder(&desc).BuildImmutable() } toOid := typedesc.TypeIDToOID typeExpr := "'hello'::@100015 = 'hello'::@100015" diff --git a/pkg/ccl/backupccl/manifest_handling.go b/pkg/ccl/backupccl/manifest_handling.go index 1f80e8e1ed8f..39701989a976 100644 --- a/pkg/ccl/backupccl/manifest_handling.go +++ b/pkg/ccl/backupccl/manifest_handling.go @@ -835,7 +835,7 @@ func loadSQLDescsFromBackupsAtTime( unwrapDescriptors := func(raw []descpb.Descriptor) []catalog.Descriptor { ret := make([]catalog.Descriptor, 0, len(raw)) for i := range raw { - ret = append(ret, catalogkv.UnwrapDescriptorRaw(context.TODO(), &raw[i])) + ret = append(ret, catalogkv.NewBuilder(&raw[i]).BuildExistingMutable()) } return ret } @@ -869,7 +869,7 @@ func loadSQLDescsFromBackupsAtTime( for _, raw := range byID { // A revision may have been captured before it was in a DB that is // backed up -- if the DB is missing, filter the object. - desc := catalogkv.UnwrapDescriptorRaw(context.TODO(), raw) + desc := catalogkv.NewBuilder(raw).BuildExistingMutable() var isObject bool switch desc.(type) { case catalog.TableDescriptor, catalog.TypeDescriptor, catalog.SchemaDescriptor: diff --git a/pkg/ccl/backupccl/restore_job.go b/pkg/ccl/backupccl/restore_job.go index 8c154a1c130c..afb6d7f41161 100644 --- a/pkg/ccl/backupccl/restore_job.go +++ b/pkg/ccl/backupccl/restore_job.go @@ -444,16 +444,7 @@ func WriteDescriptors( } return err } - - bdg := catalogkv.NewOneLevelUncachedDescGetter(txn, codec) - descs := make([]catalog.Descriptor, 0, len(databases)+len(tables)) - for _, table := range tables { - descs = append(descs, table) - } - for _, db := range databases { - descs = append(descs, db) - } - return catalog.ValidateSelfAndCrossReferences(ctx, bdg, descs...) + return nil }() return errors.Wrapf(err, "restoring table desc and namespace entries") } @@ -882,9 +873,9 @@ func spansForAllRestoreTableIndexes( // entire interval. DROPPED tables should never later become PUBLIC. // TODO(pbardea): Consider and test the interaction between revision_history // backups and OFFLINE tables. - rawTbl := descpb.TableFromDescriptor(rev.Desc, hlc.Timestamp{}) + rawTbl, _, _, _ := descpb.FromDescriptor(rev.Desc) if rawTbl != nil && rawTbl.State != descpb.DescriptorState_DROP { - tbl := tabledesc.NewImmutable(*rawTbl) + tbl := tabledesc.NewBuilder(rawTbl).BuildImmutableTable() for _, idx := range tbl.NonDropIndexes() { key := tableAndIndex{tableID: tbl.GetID(), indexID: idx.GetID()} if !added[key] { @@ -949,7 +940,7 @@ func createImportingDescriptors( for _, desc := range sqlDescs { switch desc := desc.(type) { case catalog.TableDescriptor: - mut := tabledesc.NewCreatedMutable(*desc.TableDesc()) + mut := tabledesc.NewBuilder(desc.TableDesc()).BuildCreatedMutableTable() if shouldPreRestore(mut) { preRestoreTables = append(preRestoreTables, mut) } else { @@ -960,15 +951,15 @@ func createImportingDescriptors( oldTableIDs = append(oldTableIDs, mut.GetID()) case catalog.DatabaseDescriptor: if _, ok := details.DescriptorRewrites[desc.GetID()]; ok { - mut := dbdesc.NewCreatedMutable(*desc.DatabaseDesc()) + mut := dbdesc.NewBuilder(desc.DatabaseDesc()).BuildCreatedMutableDatabase() databases = append(databases, mut) mutableDatabases = append(mutableDatabases, mut) } case catalog.SchemaDescriptor: - mut := schemadesc.NewCreatedMutable(*desc.SchemaDesc()) + mut := schemadesc.NewBuilder(desc.SchemaDesc()).BuildCreatedMutableSchema() schemas = append(schemas, mut) case catalog.TypeDescriptor: - mut := typedesc.NewCreatedMutable(*desc.TypeDesc()) + mut := typedesc.NewBuilder(desc.TypeDesc()).BuildCreatedMutableType() types = append(types, mut) } } @@ -1946,9 +1937,9 @@ func (r *restoreResumer) dropDescriptors( // Delete any schema descriptors that this restore created. Also collect the // descriptors so we can update their parent databases later. - dbsWithDeletedSchemas := make(map[descpb.ID][]*descpb.SchemaDescriptor) + dbsWithDeletedSchemas := make(map[descpb.ID][]catalog.SchemaDescriptor) for _, schemaDesc := range details.SchemaDescs { - sc := schemadesc.NewMutableExisting(*schemaDesc) + sc := schemadesc.NewBuilder(schemaDesc).BuildImmutableSchema() // We need to ignore descriptors we just added since we haven't committed the txn that deletes these. isSchemaEmpty, err := isSchemaEmpty(ctx, txn, sc.GetID(), allDescs, ignoredChildDescIDs) if err != nil { @@ -1963,19 +1954,19 @@ func (r *restoreResumer) dropDescriptors( ctx, b, codec, - sc.ParentID, + sc.GetParentID(), keys.RootNamespaceID, - sc.Name, + sc.GetName(), false, /* kvTrace */ ) - b.Del(catalogkeys.MakeDescMetadataKey(codec, sc.ID)) - dbsWithDeletedSchemas[sc.GetParentID()] = append(dbsWithDeletedSchemas[sc.GetParentID()], sc.SchemaDesc()) + b.Del(catalogkeys.MakeDescMetadataKey(codec, sc.GetID())) + dbsWithDeletedSchemas[sc.GetParentID()] = append(dbsWithDeletedSchemas[sc.GetParentID()], sc) } // Delete the database descriptors. deletedDBs := make(map[descpb.ID]struct{}) for _, dbDesc := range details.DatabaseDescs { - db := dbdesc.NewExistingMutable(*dbDesc) + db := dbdesc.NewBuilder(dbDesc).BuildExistingMutable() // We need to ignore descriptors we just added since we haven't committed the txn that deletes these. isDBEmpty, err := isDatabaseEmpty(ctx, txn, db.GetID(), allDescs, ignoredChildDescIDs) if err != nil { @@ -2043,7 +2034,7 @@ func (r *restoreResumer) removeExistingTypeBackReferences( existingTypes := make(map[descpb.ID]*typedesc.Mutable) for i := range details.TypeDescs { typ := details.TypeDescs[i] - restoredTypes[typ.ID] = typedesc.NewImmutable(*typ) + restoredTypes[typ.ID] = typedesc.NewBuilder(typ).BuildImmutableType() } for _, tbl := range restoredTables { lookup := func(id descpb.ID) (catalog.TypeDescriptor, error) { @@ -2114,15 +2105,7 @@ func getRestoringPrivileges( user security.SQLUsername, wroteDBs map[descpb.ID]catalog.DatabaseDescriptor, descCoverage tree.DescriptorCoverage, -) (*descpb.PrivilegeDescriptor, error) { - // Don't update the privileges of descriptors if we're doing a cluster - // restore. - if descCoverage == tree.AllDescriptors { - return nil, nil - } - - var updatedPrivileges *descpb.PrivilegeDescriptor - +) (updatedPrivileges *descpb.PrivilegeDescriptor, err error) { switch desc := desc.(type) { case catalog.TableDescriptor, catalog.SchemaDescriptor: if wrote, ok := wroteDBs[desc.GetParentID()]; ok { @@ -2130,14 +2113,13 @@ func getRestoringPrivileges( // table and schema should be that of the parent DB. // // Leave the privileges of the temp system tables as the default too. - if descCoverage != tree.AllDescriptors || wrote.GetName() == restoreTempSystemDB { + if descCoverage == tree.RequestedDescriptors || wrote.GetName() == restoreTempSystemDB { updatedPrivileges = wrote.GetPrivileges() } - } else { + } else if descCoverage == tree.RequestedDescriptors { parentDB, err := catalogkv.MustGetDatabaseDescByID(ctx, txn, codec, desc.GetParentID()) if err != nil { - return nil, errors.Wrapf(err, - "failed to lookup parent DB %d", errors.Safe(desc.GetParentID())) + return nil, errors.Wrapf(err, "failed to lookup parent DB %d", errors.Safe(desc.GetParentID())) } // Default is to copy privs from restoring parent db, like CREATE {TABLE, @@ -2147,10 +2129,12 @@ func getRestoringPrivileges( updatedPrivileges = sql.CreateInheritedPrivilegesFromDBDesc(parentDB, user) } case catalog.TypeDescriptor, catalog.DatabaseDescriptor: - // If the restore is not a cluster restore we cannot know that the users on - // the restoring cluster match the ones that were on the cluster that was - // backed up. So we wipe the privileges on the type/database. - updatedPrivileges = descpb.NewDefaultPrivilegeDescriptor(user) + if descCoverage == tree.RequestedDescriptors { + // If the restore is not a cluster restore we cannot know that the users on + // the restoring cluster match the ones that were on the cluster that was + // backed up. So we wipe the privileges on the type/database. + updatedPrivileges = descpb.NewDefaultPrivilegeDescriptor(user) + } } return updatedPrivileges, nil } diff --git a/pkg/ccl/backupccl/restore_planning.go b/pkg/ccl/backupccl/restore_planning.go index ce73f7877264..8cff9a4e0e8a 100644 --- a/pkg/ccl/backupccl/restore_planning.go +++ b/pkg/ccl/backupccl/restore_planning.go @@ -51,7 +51,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/storage/cloudimpl" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" - "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/cockroach/pkg/util/tracing" "github.com/cockroachdb/errors" "github.com/lib/pq/oid" @@ -393,17 +392,17 @@ func allocateDescriptorRewrites( // DB. descriptorRewrites[tempSysDBID] = &jobspb.RestoreDetails_DescriptorRewrite{ID: tempSysDBID} for _, table := range tablesByID { - if table.GetParentID() == systemschema.SystemDB.ID { + if table.GetParentID() == systemschema.SystemDB.GetID() { descriptorRewrites[table.GetID()] = &jobspb.RestoreDetails_DescriptorRewrite{ParentID: tempSysDBID} } } for _, sc := range typesByID { - if sc.GetParentID() == systemschema.SystemDB.ID { + if sc.GetParentID() == systemschema.SystemDB.GetID() { descriptorRewrites[sc.GetID()] = &jobspb.RestoreDetails_DescriptorRewrite{ParentID: tempSysDBID} } } for _, typ := range typesByID { - if typ.GetParentID() == systemschema.SystemDB.ID { + if typ.GetParentID() == systemschema.SystemDB.GetID() { descriptorRewrites[typ.GetID()] = &jobspb.RestoreDetails_DescriptorRewrite{ParentID: tempSysDBID} } } @@ -575,7 +574,9 @@ func allocateDescriptorRewrites( } // Check that the table name is _not_ in use. // This would fail the CPut later anyway, but this yields a prettier error. - if err := CheckObjectExists(ctx, txn, p.ExecCfg().Codec, parentID, table.GetParentSchemaID(), table.Name); err != nil { + tableName := tree.NewUnqualifiedTableName(tree.Name(table.GetName())) + err := catalogkv.CheckObjectCollision(ctx, txn, p.ExecCfg().Codec, parentID, table.GetParentSchemaID(), tableName) + if err != nil { return err } @@ -649,11 +650,18 @@ func allocateDescriptorRewrites( } // See if there is an existing type with the same name. - found, id, err := catalogkv.LookupObjectID(ctx, txn, p.ExecCfg().Codec, parentID, typ.GetParentSchemaID(), typ.Name) + desc, err := catalogkv.GetDescriptorCollidingWithObject( + ctx, + txn, + p.ExecCfg().Codec, + parentID, + typ.GetParentSchemaID(), + typ.Name, + ) if err != nil { return err } - if !found { + if desc == nil { // If we didn't find a type with the same name, then mark that we // need to create the type. @@ -667,7 +675,9 @@ func allocateDescriptorRewrites( // Ensure that there isn't a collision with the array type name. arrTyp := typesByID[typ.ArrayTypeID] - if err := CheckObjectExists(ctx, txn, p.ExecCfg().Codec, parentID, typ.GetParentSchemaID(), arrTyp.Name); err != nil { + typeName := tree.NewUnqualifiedTypeName(tree.Name(arrTyp.GetName())) + err := catalogkv.CheckObjectCollision(ctx, txn, p.ExecCfg().Codec, parentID, typ.GetParentSchemaID(), typeName) + if err != nil { return errors.Wrapf(err, "name collision for %q's array type", typ.Name) } // Create the rewrite entry for the array type as well. @@ -676,11 +686,6 @@ func allocateDescriptorRewrites( // If there was a name collision, we'll try to see if we can remap // this type to the type existing in the cluster. - // See what kind of object we collided with. - desc, err := catalogkv.GetAnyDescriptorByID(ctx, txn, p.ExecCfg().Codec, id, catalogkv.Immutable) - if err != nil { - return err - } // If the collided object isn't a type, then error out. existingType, isType := desc.(*typedesc.Immutable) if !isType { @@ -873,9 +878,8 @@ func maybeUpgradeTableDescsInBackupManifests( // descriptors so that they can be looked up. for _, backupManifest := range backupManifests { for _, desc := range backupManifest.Descriptors { - if table := descpb.TableFromDescriptor(&desc, hlc.Timestamp{}); table != nil { - descGetter[table.ID] = - tabledesc.NewImmutable(*protoutil.Clone(table).(*descpb.TableDescriptor)) + if table, _, _, _ := descpb.FromDescriptor(&desc); table != nil { + descGetter[table.ID] = tabledesc.NewBuilder(table).BuildImmutable() } } } @@ -883,18 +887,19 @@ func maybeUpgradeTableDescsInBackupManifests( for i := range backupManifests { backupManifest := &backupManifests[i] for j := range backupManifest.Descriptors { - table := descpb.TableFromDescriptor(&backupManifest.Descriptors[j], hlc.Timestamp{}) + table, _, _, _ := descpb.FromDescriptor(&backupManifest.Descriptors[j]) if table == nil { continue } if !tabledesc.TableHasDeprecatedForeignKeyRepresentation(table) { continue } - desc, err := tabledesc.NewFilledInExistingMutable(ctx, descGetter, skipFKsWithNoMatchingTable, table) + b := tabledesc.NewBuilderForFKUpgrade(table, skipFKsWithNoMatchingTable) + err := b.RunPostDeserializationChanges(ctx, descGetter) if err != nil { return err } - backupManifest.Descriptors[j] = *desc.DescriptorProto() + backupManifest.Descriptors[j] = *b.BuildExistingMutable().DescriptorProto() } } return nil diff --git a/pkg/ccl/backupccl/show.go b/pkg/ccl/backupccl/show.go index bc4f99b8a27a..4a8d571888cd 100644 --- a/pkg/ccl/backupccl/show.go +++ b/pkg/ccl/backupccl/show.go @@ -34,7 +34,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/storage/cloud" "github.com/cockroachdb/cockroach/pkg/storage/cloudimpl" "github.com/cockroachdb/cockroach/pkg/util/encoding" - "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/util/tracing" @@ -266,16 +265,14 @@ func backupShowerDefault( schemaIDToName := make(map[descpb.ID]string) schemaIDToName[keys.PublicSchemaID] = sessiondata.PublicSchemaName for i := range manifest.Descriptors { - descriptor := &manifest.Descriptors[i] - if descriptor.GetDatabase() != nil { - id := descpb.GetDescriptorID(descriptor) - if _, ok := dbIDToName[id]; !ok { - dbIDToName[id] = descpb.GetDescriptorName(descriptor) + _, db, _, schema := descpb.FromDescriptor(&manifest.Descriptors[i]) + if db != nil { + if _, ok := dbIDToName[db.ID]; !ok { + dbIDToName[db.ID] = db.Name } - } else if descriptor.GetSchema() != nil { - id := descpb.GetDescriptorID(descriptor) - if _, ok := schemaIDToName[id]; !ok { - schemaIDToName[id] = descpb.GetDescriptorName(descriptor) + } else if schema != nil { + if _, ok := schemaIDToName[schema.ID]; !ok { + schemaIDToName[schema.ID] = schema.Name } } } @@ -317,7 +314,7 @@ func backupShowerDefault( dataSizeDatum := tree.DNull rowCountDatum := tree.DNull - desc := catalogkv.UnwrapDescriptorRaw(ctx, descriptor) + desc := catalogkv.NewBuilder(descriptor).BuildExistingMutable() descriptorName := desc.GetName() switch desc := desc.(type) { @@ -343,7 +340,7 @@ func backupShowerDefault( IgnoreComments: true, } createStmt, err := p.ShowCreate(ctx, dbName, manifest.Descriptors, - tabledesc.NewImmutable(*desc.TableDesc()), displayOptions) + tabledesc.NewBuilder(desc.TableDesc()).BuildImmutableTable(), displayOptions) if err != nil { // We expect that we might get an error here due to X-DB // references, which were possible on 20.2 betas and rcs. @@ -411,21 +408,24 @@ func nullIfEmpty(s string) tree.Datum { func showPrivileges(descriptor *descpb.Descriptor) string { var privStringBuilder strings.Builder - var privDesc *descpb.PrivilegeDescriptor + b := catalogkv.NewBuilder(descriptor) + if b == nil { + return "" + } var objectType privilege.ObjectType - if db := descriptor.GetDatabase(); db != nil { - privDesc = db.GetPrivileges() + switch b.DescriptorType() { + case catalog.Database: objectType = privilege.Database - } else if typ := descriptor.GetType(); typ != nil { - privDesc = typ.GetPrivileges() - objectType = privilege.Type - } else if table := descpb.TableFromDescriptor(descriptor, hlc.Timestamp{}); table != nil { - privDesc = table.GetPrivileges() + case catalog.Table: objectType = privilege.Table - } else if schema := descriptor.GetSchema(); schema != nil { - privDesc = schema.GetPrivileges() + case catalog.Type: + objectType = privilege.Type + case catalog.Schema: objectType = privilege.Schema + default: + return "" } + privDesc := b.BuildImmutable().GetPrivileges() if privDesc == nil { return "" } diff --git a/pkg/ccl/backupccl/targets.go b/pkg/ccl/backupccl/targets.go index ba420f54a407..8f18727f9bb9 100644 --- a/pkg/ccl/backupccl/targets.go +++ b/pkg/ccl/backupccl/targets.go @@ -25,7 +25,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/errors" ) @@ -139,7 +138,7 @@ func getRelevantDescChanges( if isInterestingID(change.ID) { interestingChanges = append(interestingChanges, change) } else if change.Desc != nil { - desc := catalogkv.UnwrapDescriptorRaw(ctx, change.Desc) + desc := catalogkv.NewBuilder(change.Desc).BuildExistingMutable() switch desc := desc.(type) { case catalog.TableDescriptor, catalog.TypeDescriptor, catalog.SchemaDescriptor: if _, ok := interestingParents[desc.GetParentID()]; ok { @@ -189,18 +188,16 @@ func getAllDescChanges( if err := rev.GetProto(&desc); err != nil { return nil, err } + r.Desc = &desc + // Collect the prior IDs of table descriptors, as the ID may have been + // changed during truncate prior to 20.2. // We update the modification time for the descriptors here with the // timestamp of the KV row so that we can identify the appropriate // descriptors to use during restore. // Note that the modification time of descriptors on disk is usually 0. // See the comment on MaybeSetDescriptorModificationTime... for more. - descpb.MaybeSetDescriptorModificationTimeFromMVCCTimestamp(ctx, &desc, rev.Timestamp) - - // Collect the prior IDs of table descriptors, as the ID may have been - // changed during truncate. - r.Desc = &desc - t := descpb.TableFromDescriptor(&desc, rev.Timestamp) + t, _, _, _ := descpb.FromDescriptorWithMVCCTimestamp(r.Desc, rev.Timestamp) if t != nil && t.ReplacementOf.ID != descpb.InvalidID { priorIDs[t.ID] = t.ReplacementOf.ID } @@ -312,31 +309,6 @@ func lookupDatabaseID( return id, nil } -// CheckObjectExists returns an error if an object already exists with a given -// parent, parent schema and name. -func CheckObjectExists( - ctx context.Context, - txn *kv.Txn, - codec keys.SQLCodec, - parentID descpb.ID, - parentSchemaID descpb.ID, - name string, -) error { - found, id, err := catalogkv.LookupObjectID(ctx, txn, codec, parentID, parentSchemaID, name) - if err != nil { - return err - } - if found { - // Find what object we collided with. - desc, err := catalogkv.GetAnyDescriptorByID(ctx, txn, codec, id, catalogkv.Immutable) - if err != nil { - return sqlerrors.WrapErrorWhileConstructingObjectAlreadyExistsErr(err) - } - return sqlerrors.MakeObjectAlreadyExistsError(desc.DescriptorProto(), name) - } - return nil -} - func fullClusterTargetsRestore( allDescs []catalog.Descriptor, lastBackupManifest BackupManifest, ) ([]catalog.Descriptor, []catalog.DatabaseDescriptor, []descpb.TenantInfo, error) { diff --git a/pkg/ccl/changefeedccl/avro_test.go b/pkg/ccl/changefeedccl/avro_test.go index d6ca21414bc0..a82a1eb5e1e7 100644 --- a/pkg/ccl/changefeedccl/avro_test.go +++ b/pkg/ccl/changefeedccl/avro_test.go @@ -123,7 +123,7 @@ func parseAvroSchema(j string) (*avroDataRecord, error) { } tableDesc.Columns = append(tableDesc.Columns, *colDesc) } - return tableToAvroSchema(tabledesc.NewImmutable(tableDesc), avroSchemaNoSuffix) + return tableToAvroSchema(tabledesc.NewBuilder(&tableDesc).BuildImmutableTable(), avroSchemaNoSuffix) } func avroFieldMetadataToColDesc(metadata string) (*descpb.ColumnDescriptor, error) { diff --git a/pkg/ccl/changefeedccl/changefeed_test.go b/pkg/ccl/changefeedccl/changefeed_test.go index cc7bd9572f6f..59cb002c3096 100644 --- a/pkg/ccl/changefeedccl/changefeed_test.go +++ b/pkg/ccl/changefeedccl/changefeed_test.go @@ -973,7 +973,7 @@ func fetchDescVersionModificationTime( if err := value.GetProto(&desc); err != nil { t.Fatal(err) } - if tableDesc := descpb.TableFromDescriptor(&desc, k.Timestamp); tableDesc != nil { + if tableDesc, _, _, _ := descpb.FromDescriptorWithMVCCTimestamp(&desc, k.Timestamp); tableDesc != nil { if int(tableDesc.Version) == version { return tableDesc.ModificationTime } diff --git a/pkg/ccl/changefeedccl/schemafeed/BUILD.bazel b/pkg/ccl/changefeedccl/schemafeed/BUILD.bazel index ff6eda21e3ff..37c634517268 100644 --- a/pkg/ccl/changefeedccl/schemafeed/BUILD.bazel +++ b/pkg/ccl/changefeedccl/schemafeed/BUILD.bazel @@ -19,7 +19,6 @@ go_library( "//pkg/sql/catalog/catalogkv", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/lease", - "//pkg/sql/catalog/tabledesc", "//pkg/sql/catalog/typedesc", "//pkg/storage", "//pkg/util/encoding", diff --git a/pkg/ccl/changefeedccl/schemafeed/schema_feed.go b/pkg/ccl/changefeedccl/schemafeed/schema_feed.go index 9db60623f9e0..ac96a71bc481 100644 --- a/pkg/ccl/changefeedccl/schemafeed/schema_feed.go +++ b/pkg/ccl/changefeedccl/schemafeed/schema_feed.go @@ -24,7 +24,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/lease" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/util/encoding" @@ -621,10 +620,9 @@ func (tf *SchemaFeed) fetchDescriptorVersions( return err } - if tableDesc := descpb.TableFromDescriptor(&desc, k.Timestamp); tableDesc != nil { - descs = append(descs, tabledesc.NewImmutable(*tableDesc)) - } else if typeDesc := descpb.TypeFromDescriptor(&desc, k.Timestamp); typeDesc != nil { - descs = append(descs, typedesc.NewImmutable(*typeDesc)) + b := catalogkv.NewBuilderWithMVCCTimestamp(&desc, k.Timestamp) + if b != nil && (b.DescriptorType() == catalog.Table || b.DescriptorType() == catalog.Type) { + descs = append(descs, b.BuildImmutable()) } } }(); err != nil { diff --git a/pkg/ccl/changefeedccl/schemafeed/schema_feed_test.go b/pkg/ccl/changefeedccl/schemafeed/schema_feed_test.go index 7ffba02147cc..ee28662ac729 100644 --- a/pkg/ccl/changefeedccl/schemafeed/schema_feed_test.go +++ b/pkg/ccl/changefeedccl/schemafeed/schema_feed_test.go @@ -69,7 +69,7 @@ func TestTableHistoryIngestionTracking(t *testing.T) { // validates require.NoError(t, m.ingestDescriptors(ctx, ts(3), ts(4), []catalog.Descriptor{ - tabledesc.NewImmutable(descpb.TableDescriptor{ID: 0}), + tabledesc.NewBuilder(&descpb.TableDescriptor{ID: 0}).BuildImmutable(), }, validateFn)) require.Equal(t, ts(4), m.highWater()) @@ -109,7 +109,7 @@ func TestTableHistoryIngestionTracking(t *testing.T) { // does not validate, high-water does not change require.EqualError(t, m.ingestDescriptors(ctx, ts(7), ts(10), []catalog.Descriptor{ - tabledesc.NewImmutable(descpb.TableDescriptor{ID: 0, Name: `whoops!`}), + tabledesc.NewBuilder(&descpb.TableDescriptor{ID: 0, Name: `whoops!`}).BuildImmutable(), }, validateFn), `descriptor: whoops!`) require.Equal(t, ts(7), m.highWater()) @@ -126,7 +126,7 @@ func TestTableHistoryIngestionTracking(t *testing.T) { // turns out ts 10 is not a tight bound. ts 9 also has an error require.EqualError(t, m.ingestDescriptors(ctx, ts(7), ts(9), []catalog.Descriptor{ - tabledesc.NewImmutable(descpb.TableDescriptor{ID: 0, Name: `oh no!`}), + tabledesc.NewBuilder(&descpb.TableDescriptor{ID: 0, Name: `oh no!`}).BuildImmutable(), }, validateFn), `descriptor: oh no!`) require.Equal(t, ts(7), m.highWater()) require.EqualError(t, <-errCh9, `descriptor: oh no!`) diff --git a/pkg/ccl/changefeedccl/schemafeed/schematestutils/schema_test_utils.go b/pkg/ccl/changefeedccl/schemafeed/schematestutils/schema_test_utils.go index 8f91952fe4ad..aedb49eaf12b 100644 --- a/pkg/ccl/changefeedccl/schemafeed/schematestutils/schema_test_utils.go +++ b/pkg/ccl/changefeedccl/schemafeed/schematestutils/schema_test_utils.go @@ -36,7 +36,7 @@ func MakeTableDesc( td.Columns = append(td.Columns, *MakeColumnDesc(td.NextColumnID)) td.NextColumnID++ } - return tabledesc.NewImmutable(td) + return tabledesc.NewBuilder(&td).BuildImmutableTable() } // MakeColumnDesc makes a generic column descriptor with the provided id. diff --git a/pkg/ccl/changefeedccl/sink_cloudstorage_test.go b/pkg/ccl/changefeedccl/sink_cloudstorage_test.go index b02c1a615ed2..cdf96deea4b8 100644 --- a/pkg/ccl/changefeedccl/sink_cloudstorage_test.go +++ b/pkg/ccl/changefeedccl/sink_cloudstorage_test.go @@ -39,6 +39,11 @@ import ( "github.com/stretchr/testify/require" ) +func makeTopic(name string) tableDescriptorTopic { + desc := tabledesc.NewBuilder(&descpb.TableDescriptor{Name: name}).BuildImmutableTable() + return tableDescriptorTopic{desc} +} + func TestCloudStorageSink(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -111,7 +116,7 @@ func TestCloudStorageSink(t *testing.T) { user := security.RootUserName() t.Run(`golden`, func(t *testing.T) { - t1 := tableDescriptorTopic{tabledesc.NewImmutable(descpb.TableDescriptor{Name: `t1`})} + t1 := makeTopic(`t1`) testSpan := roachpb.Span{Key: []byte("a"), EndKey: []byte("b")} sf := span.MakeFrontier(testSpan) timestampOracle := &changeAggregatorLowerBoundOracle{sf: sf} @@ -146,8 +151,8 @@ func TestCloudStorageSink(t *testing.T) { for _, compression := range []string{"", "gzip"} { opts[changefeedbase.OptCompression] = compression t.Run("compress="+compression, func(t *testing.T) { - t1 := tableDescriptorTopic{tabledesc.NewImmutable(descpb.TableDescriptor{Name: `t1`})} - t2 := tableDescriptorTopic{tabledesc.NewImmutable(descpb.TableDescriptor{Name: `t2`})} + t1 := makeTopic(`t1`) + t2 := makeTopic(`t2`) testSpan := roachpb.Span{Key: []byte("a"), EndKey: []byte("b")} sf := span.MakeFrontier(testSpan) @@ -222,7 +227,7 @@ func TestCloudStorageSink(t *testing.T) { }) t.Run(`multi-node`, func(t *testing.T) { - t1 := tableDescriptorTopic{tabledesc.NewImmutable(descpb.TableDescriptor{Name: `t1`})} + t1 := makeTopic(`t1`) testSpan := roachpb.Span{Key: []byte("a"), EndKey: []byte("b")} sf := span.MakeFrontier(testSpan) @@ -303,7 +308,7 @@ func TestCloudStorageSink(t *testing.T) { // This test is also sufficient for verifying the behavior of a multi-node // changefeed using this sink. Ditto job restarts. t.Run(`zombie`, func(t *testing.T) { - t1 := tableDescriptorTopic{tabledesc.NewImmutable(descpb.TableDescriptor{Name: `t1`})} + t1 := makeTopic(`t1`) testSpan := roachpb.Span{Key: []byte("a"), EndKey: []byte("b")} sf := span.MakeFrontier(testSpan) timestampOracle := &changeAggregatorLowerBoundOracle{sf: sf} @@ -344,7 +349,7 @@ func TestCloudStorageSink(t *testing.T) { }) t.Run(`bucketing`, func(t *testing.T) { - t1 := tableDescriptorTopic{tabledesc.NewImmutable(descpb.TableDescriptor{Name: `t1`})} + t1 := makeTopic(`t1`) testSpan := roachpb.Span{Key: []byte("a"), EndKey: []byte("b")} sf := span.MakeFrontier(testSpan) timestampOracle := &changeAggregatorLowerBoundOracle{sf: sf} @@ -432,7 +437,7 @@ func TestCloudStorageSink(t *testing.T) { }) t.Run(`file-ordering`, func(t *testing.T) { - t1 := tableDescriptorTopic{tabledesc.NewImmutable(descpb.TableDescriptor{Name: `t1`})} + t1 := makeTopic(`t1`) testSpan := roachpb.Span{Key: []byte("a"), EndKey: []byte("b")} sf := span.MakeFrontier(testSpan) timestampOracle := &changeAggregatorLowerBoundOracle{sf: sf} @@ -491,7 +496,7 @@ func TestCloudStorageSink(t *testing.T) { }) t.Run(`ordering-among-schema-versions`, func(t *testing.T) { - t1 := tableDescriptorTopic{tabledesc.NewImmutable(descpb.TableDescriptor{Name: `t1`})} + t1 := makeTopic(`t1`) testSpan := roachpb.Span{Key: []byte("a"), EndKey: []byte("b")} sf := span.MakeFrontier(testSpan) timestampOracle := &changeAggregatorLowerBoundOracle{sf: sf} diff --git a/pkg/ccl/changefeedccl/sink_test.go b/pkg/ccl/changefeedccl/sink_test.go index 4f5e148cb5b8..a6180dcbec40 100644 --- a/pkg/ccl/changefeedccl/sink_test.go +++ b/pkg/ccl/changefeedccl/sink_test.go @@ -51,7 +51,7 @@ func (p asyncProducerMock) Close() error { } func topic(name string) tableDescriptorTopic { - return tableDescriptorTopic{tabledesc.NewImmutable(descpb.TableDescriptor{Name: name})} + return tableDescriptorTopic{tabledesc.NewBuilder(&descpb.TableDescriptor{Name: name}).BuildImmutableTable()} } func TestKafkaSink(t *testing.T) { @@ -186,7 +186,7 @@ func TestSQLSink(t *testing.T) { topic := func(name string) tableDescriptorTopic { id, _ := strconv.ParseUint(name, 36, 64) return tableDescriptorTopic{ - tabledesc.NewImmutable(descpb.TableDescriptor{Name: name, ID: descpb.ID(id)})} + tabledesc.NewBuilder(&descpb.TableDescriptor{Name: name, ID: descpb.ID(id)}).BuildImmutableTable()} } ctx := context.Background() diff --git a/pkg/ccl/cliccl/BUILD.bazel b/pkg/ccl/cliccl/BUILD.bazel index 7c248c0b4c4f..f29606009627 100644 --- a/pkg/ccl/cliccl/BUILD.bazel +++ b/pkg/ccl/cliccl/BUILD.bazel @@ -30,7 +30,6 @@ go_library( "//pkg/storage/cloudimpl", "//pkg/storage/enginepb", "//pkg/util/envutil", - "//pkg/util/hlc", "//pkg/util/humanizeutil", "//pkg/util/log", "//pkg/util/protoutil", diff --git a/pkg/ccl/cliccl/load.go b/pkg/ccl/cliccl/load.go index 7b679634a9c6..52a1e2809b82 100644 --- a/pkg/ccl/cliccl/load.go +++ b/pkg/ccl/cliccl/load.go @@ -23,7 +23,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/storage/cloud" "github.com/cockroachdb/cockroach/pkg/storage/cloudimpl" - "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/humanizeutil" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/errors" @@ -104,14 +103,17 @@ func runLoadShow(cmd *cobra.Command, args []string) error { fmt.Printf("Descriptors:\n") for i := range desc.Descriptors { d := &desc.Descriptors[i] - if desc := descpb.TableFromDescriptor(d, hlc.Timestamp{}); desc != nil { - fmt.Printf(" %d: %s (table)\n", - descpb.GetDescriptorID(d), descpb.GetDescriptorName(d)) - } - if desc := d.GetDatabase(); desc != nil { - fmt.Printf(" %d: %s (database)\n", - descpb.GetDescriptorID(d), descpb.GetDescriptorName(d)) + table, database, _, _ := descpb.FromDescriptor(d) + var typeName string + if table != nil { + typeName = "table" + } else if database != nil { + typeName = "database" + } else { + continue } + fmt.Printf(" %d: %s (%s)\n", + descpb.GetDescriptorID(d), descpb.GetDescriptorName(d), typeName) } return nil } diff --git a/pkg/ccl/importccl/import_processor.go b/pkg/ccl/importccl/import_processor.go index 3bdffef93d76..9c38cdc7bbb0 100644 --- a/pkg/ccl/importccl/import_processor.go +++ b/pkg/ccl/importccl/import_processor.go @@ -172,7 +172,7 @@ func makeInputConverter( var singleTableTargetCols tree.NameList if len(spec.Tables) == 1 { for _, table := range spec.Tables { - singleTable = tabledesc.NewImmutable(*table.Desc) + singleTable = tabledesc.NewBuilder(table.Desc).BuildImmutableTable() singleTableTargetCols = make(tree.NameList, len(table.TargetCols)) for i, colName := range table.TargetCols { singleTableTargetCols[i] = tree.Name(colName) diff --git a/pkg/ccl/importccl/import_processor_test.go b/pkg/ccl/importccl/import_processor_test.go index 696b1c2158c0..bd6890810dc1 100644 --- a/pkg/ccl/importccl/import_processor_test.go +++ b/pkg/ccl/importccl/import_processor_test.go @@ -914,7 +914,7 @@ func newTestSpec( switch format.Format { case roachpb.IOFileFormat_CSV: descr = descForTable(ctx, t, - "CREATE TABLE simple (i INT PRIMARY KEY, s text )", 10, 20, NoFKs) + "CREATE TABLE simple (i INT PRIMARY KEY, s text )", 100, 200, NoFKs) case roachpb.IOFileFormat_Mysqldump, roachpb.IOFileFormat_MysqlOutfile, @@ -922,7 +922,7 @@ func newTestSpec( roachpb.IOFileFormat_PgCopy, roachpb.IOFileFormat_Avro: descr = descForTable(ctx, t, - "CREATE TABLE simple (i INT PRIMARY KEY, s text, b bytea default null)", 10, 20, NoFKs) + "CREATE TABLE simple (i INT PRIMARY KEY, s text, b bytea default null)", 100, 200, NoFKs) default: t.Fatalf("Unsupported input format: %v", format) } diff --git a/pkg/ccl/importccl/import_stmt.go b/pkg/ccl/importccl/import_stmt.go index a5b6b0a2b836..dbb651e553be 100644 --- a/pkg/ccl/importccl/import_stmt.go +++ b/pkg/ccl/importccl/import_stmt.go @@ -1121,7 +1121,7 @@ func prepareNewTableDescsForIngestion( ) ([]*descpb.TableDescriptor, error) { newMutableTableDescriptors := make([]*tabledesc.Mutable, len(importTables)) for i := range importTables { - newMutableTableDescriptors[i] = tabledesc.NewCreatedMutable(*importTables[i].Desc) + newMutableTableDescriptors[i] = tabledesc.NewBuilder(importTables[i].Desc).BuildCreatedMutableTable() } // Verification steps have passed, generate a new table ID if we're @@ -1158,14 +1158,15 @@ func prepareNewTableDescsForIngestion( // collisions with any importing tables. for i := range newMutableTableDescriptors { tbl := newMutableTableDescriptors[i] - if err := backupccl.CheckObjectExists( + err := catalogkv.CheckObjectCollision( ctx, txn, p.ExecCfg().Codec, tbl.GetParentID(), tbl.GetParentSchemaID(), - tbl.GetName(), - ); err != nil { + tree.NewUnqualifiedTableName(tree.Name(tbl.GetName())), + ) + if err != nil { return nil, err } } @@ -1391,7 +1392,7 @@ func (r *importResumer) prepareSchemasForIngestion( mutableSchemaDescs := make([]*schemadesc.Mutable, 0) for _, desc := range details.Schemas { schemaMetadata.oldSchemaIDToName[desc.Desc.GetID()] = desc.Desc.GetName() - newMutableSchemaDescriptor := schemadesc.NewCreatedMutable(*desc.Desc) + newMutableSchemaDescriptor := schemadesc.NewBuilder(desc.Desc).BuildCreatedMutable().(*schemadesc.Mutable) // Verification steps have passed, generate a new schema ID. We do this // last because we want to avoid calling GenerateUniqueDescID if there's @@ -1943,7 +1944,8 @@ func (r *importResumer) Resume(ctx context.Context, execCtx interface{}) error { // case we can cheaply clear-range instead of revert-range to cleanup. for i := range details.Tables { if !details.Tables[i].IsNew { - tblSpan := tabledesc.NewImmutable(*details.Tables[i].Desc).TableSpan(p.ExecCfg().Codec) + tblDesc := tabledesc.NewBuilder(details.Tables[i].Desc).BuildImmutableTable() + tblSpan := tblDesc.TableSpan(p.ExecCfg().Codec) res, err := p.ExecCfg().DB.Scan(ctx, tblSpan.Key, tblSpan.EndKey, 1 /* maxRows */) if err != nil { return errors.Wrap(err, "checking if existing table is empty") diff --git a/pkg/ccl/importccl/read_import_avro_test.go b/pkg/ccl/importccl/read_import_avro_test.go index 0e747781d2d9..11d053217bfe 100644 --- a/pkg/ccl/importccl/read_import_avro_test.go +++ b/pkg/ccl/importccl/read_import_avro_test.go @@ -199,7 +199,7 @@ func newTestHelper(ctx context.Context, t *testing.T, gens ...avroGen) *testHelp return &testHelper{ schemaJSON: string(schemaJSON), - schemaTable: descForTable(ctx, t, createStmt, 10, 20, NoFKs). + schemaTable: descForTable(ctx, t, createStmt, 100, 200, NoFKs). ImmutableCopy().(catalog.TableDescriptor), codec: codec, gens: gens, diff --git a/pkg/ccl/importccl/read_import_base_test.go b/pkg/ccl/importccl/read_import_base_test.go index 0b3c5022d888..0bfffb92ff3f 100644 --- a/pkg/ccl/importccl/read_import_base_test.go +++ b/pkg/ccl/importccl/read_import_base_test.go @@ -135,7 +135,7 @@ func TestParallelImportProducerHandlesConsumerErrors(t *testing.T) { numWorkers: 1, batchSize: 2, evalCtx: testEvalCtx, - tableDesc: tabledesc.NewImmutable(descr), + tableDesc: tabledesc.NewBuilder(&descr).BuildImmutableTable(), kvCh: kvCh, } @@ -174,7 +174,7 @@ func TestParallelImportProducerHandlesCancellation(t *testing.T) { numWorkers: 1, batchSize: 2, evalCtx: testEvalCtx, - tableDesc: tabledesc.NewImmutable(descr), + tableDesc: tabledesc.NewBuilder(&descr).BuildImmutableTable(), kvCh: kvCh, } diff --git a/pkg/ccl/importccl/read_import_mysql.go b/pkg/ccl/importccl/read_import_mysql.go index a945c4415da4..2f347edf6b42 100644 --- a/pkg/ccl/importccl/read_import_mysql.go +++ b/pkg/ccl/importccl/read_import_mysql.go @@ -73,7 +73,7 @@ func newMysqldumpReader( converters[name] = nil continue } - conv, err := row.NewDatumRowConverter(ctx, tabledesc.NewImmutable(*table.Desc), + conv, err := row.NewDatumRowConverter(ctx, tabledesc.NewBuilder(table.Desc).BuildImmutableTable(), nil /* targetColNames */, evalCtx, kvCh, nil /* seqChunkProvider */) if err != nil { return nil, err diff --git a/pkg/ccl/importccl/read_import_mysql_test.go b/pkg/ccl/importccl/read_import_mysql_test.go index 85a2e4084784..c4ca5e209404 100644 --- a/pkg/ccl/importccl/read_import_mysql_test.go +++ b/pkg/ccl/importccl/read_import_mysql_test.go @@ -44,7 +44,7 @@ func TestMysqldumpDataReader(t *testing.T) { files := getMysqldumpTestdata(t) ctx := context.Background() - table := descForTable(ctx, t, `CREATE TABLE simple (i INT PRIMARY KEY, s text, b bytea)`, 10, 20, NoFKs) + table := descForTable(ctx, t, `CREATE TABLE simple (i INT PRIMARY KEY, s text, b bytea)`, 100, 200, NoFKs) tables := map[string]*execinfrapb.ReadImportDataSpec_ImportTable{"simple": {Desc: table.TableDesc()}} opts := roachpb.MysqldumpOptions{} @@ -221,8 +221,8 @@ func compareTables(t *testing.T, expected, got *descpb.TableDescriptor) { ctx := context.Background() semaCtx := tree.MakeSemaContext() tableName := &descpb.AnonymousTable - expectedDesc := tabledesc.NewImmutable(*expected) - gotDesc := tabledesc.NewImmutable(*got) + expectedDesc := tabledesc.NewBuilder(expected).BuildImmutableTable() + gotDesc := tabledesc.NewBuilder(got).BuildImmutableTable() e, err := catformat.IndexForDisplay( ctx, expectedDesc, tableName, &expected.Indexes[i], "" /* partition */, "" /* interleave */, &semaCtx, ) diff --git a/pkg/ccl/importccl/read_import_pgdump.go b/pkg/ccl/importccl/read_import_pgdump.go index 20528c031400..dbf2ed50a747 100644 --- a/pkg/ccl/importccl/read_import_pgdump.go +++ b/pkg/ccl/importccl/read_import_pgdump.go @@ -16,7 +16,6 @@ import ( "regexp" "strings" - "github.com/cockroachdb/cockroach/pkg/ccl/backupccl" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -751,13 +750,13 @@ func readPostgresStmt( for _, name := range names { tableName := name.ToUnresolvedObjectName().String() if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - err := backupccl.CheckObjectExists( + err := catalogkv.CheckObjectCollision( ctx, txn, p.ExecCfg().Codec, parentID, keys.PublicSchemaID, - tableName, + tree.NewUnqualifiedTableName(tree.Name(tableName)), ) if err != nil { return errors.Wrapf(err, `drop table "%s" and then retry the import`, tableName) @@ -857,7 +856,7 @@ func newPgDumpReader( colMap := make(map[*row.DatumRowConverter](map[string]int)) for name, table := range descs { if table.Desc.IsTable() { - tableDesc := tabledesc.NewImmutable(*table.Desc) + tableDesc := tabledesc.NewBuilder(table.Desc).BuildImmutableTable() colSubMap := make(map[string]int, len(table.TargetCols)) targetCols := make(tree.NameList, len(table.TargetCols)) for i, colName := range table.TargetCols { @@ -875,7 +874,7 @@ func newPgDumpReader( colMap[conv] = colSubMap tableDescs[name] = tableDesc } else if table.Desc.IsSequence() { - seqDesc := tabledesc.NewImmutable(*table.Desc) + seqDesc := tabledesc.NewBuilder(table.Desc).BuildImmutableTable() tableDescs[name] = seqDesc } } diff --git a/pkg/ccl/storageccl/key_rewriter.go b/pkg/ccl/storageccl/key_rewriter.go index 5fb6ba0e0539..935b40c98a36 100644 --- a/pkg/ccl/storageccl/key_rewriter.go +++ b/pkg/ccl/storageccl/key_rewriter.go @@ -18,7 +18,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/util/encoding" - "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/errors" ) @@ -93,11 +92,11 @@ func MakeKeyRewriterFromRekeys( if err := protoutil.Unmarshal(rekey.NewDesc, &desc); err != nil { return nil, errors.Wrapf(err, "unmarshalling rekey descriptor for old table id %d", rekey.OldID) } - table := descpb.TableFromDescriptor(&desc, hlc.Timestamp{}) + table, _, _, _ := descpb.FromDescriptor(&desc) if table == nil { return nil, errors.New("expected a table descriptor") } - descs[descpb.ID(rekey.OldID)] = tabledesc.NewImmutable(*table) + descs[descpb.ID(rekey.OldID)] = tabledesc.NewBuilder(table).BuildImmutableTable() } return makeKeyRewriter(codec, descs) } diff --git a/pkg/ccl/storageccl/key_rewriter_test.go b/pkg/ccl/storageccl/key_rewriter_test.go index e2351ad995c9..7ff460ad365b 100644 --- a/pkg/ccl/storageccl/key_rewriter_test.go +++ b/pkg/ccl/storageccl/key_rewriter_test.go @@ -62,7 +62,7 @@ func TestPrefixRewriter(t *testing.T) { func TestKeyRewriter(t *testing.T) { defer leaktest.AfterTest(t)() - desc := tabledesc.NewCreatedMutable(*systemschema.NamespaceTable.TableDesc()) + desc := tabledesc.NewBuilder(systemschema.NamespaceTable.TableDesc()).BuildCreatedMutableTable() oldID := desc.ID newID := desc.ID + 1 desc.ID = newID @@ -120,7 +120,7 @@ func TestKeyRewriter(t *testing.T) { t.Run("multi", func(t *testing.T) { desc.ID = oldID + 10 - desc2 := tabledesc.NewCreatedMutable(desc.TableDescriptor) + desc2 := tabledesc.NewBuilder(&desc.TableDescriptor).BuildCreatedMutableTable() desc2.ID += 10 newKr, err := MakeKeyRewriterFromRekeys(keys.SystemSQLCodec, []roachpb.ImportRequest_TableRekey{ {OldID: uint32(oldID), NewDesc: mustMarshalDesc(t, desc.TableDesc())}, @@ -195,9 +195,9 @@ func TestKeyRewriter(t *testing.T) { } func mustMarshalDesc(t *testing.T, tableDesc *descpb.TableDescriptor) []byte { - desc := tabledesc.NewImmutable(*tableDesc).DescriptorProto() + desc := tabledesc.NewBuilder(tableDesc).BuildImmutable().DescriptorProto() // Set the timestamp to a non-zero value. - descpb.TableFromDescriptor(desc, hlc.Timestamp{WallTime: 1}) + descpb.MaybeSetDescriptorModificationTimeFromMVCCTimestamp(desc, hlc.Timestamp{WallTime: 1}) bytes, err := protoutil.Marshal(desc) if err != nil { t.Fatal(err) diff --git a/pkg/ccl/streamingccl/streamclient/BUILD.bazel b/pkg/ccl/streamingccl/streamclient/BUILD.bazel index 0527c181c5c5..01cdbd0be174 100644 --- a/pkg/ccl/streamingccl/streamclient/BUILD.bazel +++ b/pkg/ccl/streamingccl/streamclient/BUILD.bazel @@ -13,6 +13,7 @@ go_library( "//pkg/ccl/streamingccl", "//pkg/keys", "//pkg/roachpb", + "//pkg/security", "//pkg/sql", "//pkg/sql/catalog/catalogkeys", "//pkg/sql/catalog/descpb", diff --git a/pkg/ccl/streamingccl/streamclient/random_stream_client.go b/pkg/ccl/streamingccl/streamclient/random_stream_client.go index 07200d56b680..389288986c65 100644 --- a/pkg/ccl/streamingccl/streamclient/random_stream_client.go +++ b/pkg/ccl/streamingccl/streamclient/random_stream_client.go @@ -19,6 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/streamingccl" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" @@ -241,7 +242,7 @@ func (m *randomStreamClient) getDescriptorAndNamespaceKVForTableID( IngestionDatabaseID, tableID, fmt.Sprintf(RandomStreamSchemaPlaceholder, tableName), - &descpb.PrivilegeDescriptor{}, + descpb.NewDefaultPrivilegeDescriptor(security.RootUserName()), ) if err != nil { return nil, nil, err diff --git a/pkg/cli/BUILD.bazel b/pkg/cli/BUILD.bazel index 318f825c6700..f7370bbe4fce 100644 --- a/pkg/cli/BUILD.bazel +++ b/pkg/cli/BUILD.bazel @@ -120,9 +120,10 @@ go_library( "//pkg/settings", "//pkg/settings/cluster", "//pkg/sql", + "//pkg/sql/catalog", + "//pkg/sql/catalog/catalogkv", "//pkg/sql/catalog/catconstants", "//pkg/sql/catalog/descpb", - "//pkg/sql/catalog/tabledesc", "//pkg/sql/doctor", "//pkg/sql/execinfrapb", "//pkg/sql/lex", diff --git a/pkg/cli/debug.go b/pkg/cli/debug.go index 7a1494f45619..409a05d237b4 100644 --- a/pkg/cli/debug.go +++ b/pkg/cli/debug.go @@ -41,8 +41,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/server/status/statuspb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/storage" @@ -189,7 +190,11 @@ func runDebugKeys(cmd *cobra.Command, args []string) error { if err := protoutil.Unmarshal(bytes, &desc); err != nil { return err } - table := tabledesc.NewImmutable(*descpb.TableFromDescriptor(&desc, hlc.Timestamp{})) + b := catalogkv.NewBuilder(&desc) + if b == nil || b.DescriptorType() != catalog.Table { + return errors.Newf("expected a table descriptor") + } + table := b.BuildImmutable().(catalog.TableDescriptor) fn := func(kv storage.MVCCKeyValue) (string, error) { var v roachpb.Value diff --git a/pkg/cli/testdata/doctor/debugzip/system.descriptor.txt b/pkg/cli/testdata/doctor/debugzip/system.descriptor.txt index 27c0871d3b6c..890d71976706 100644 --- a/pkg/cli/testdata/doctor/debugzip/system.descriptor.txt +++ b/pkg/cli/testdata/doctor/debugzip/system.descriptor.txt @@ -21,7 +21,7 @@ id descriptor hex_descriptor 27 \012\231\005\012\021replication_stats\030\033 \001(\001:\000B#\012\007zone_id\020\001\032\014\010\001\020@\030\0000\000P\024`\000 \0000\000h\000p\000x\000B&\012\012subzone_id\020\002\032\014\010\001\020@\030\0000\000P\024`\000 \0000\000h\000p\000x\000B%\012\011report_id\020\003\032\014\010\001\020@\030\0000\000P\024`\000 \0000\000h\000p\000x\000B(\012\014total_ranges\020\004\032\014\010\001\020@\030\0000\000P\024`\000 \0000\000h\000p\000x\000B.\012\022unavailable_ranges\020\005\032\014\010\001\020@\030\0000\000P\024`\000 \0000\000h\000p\000x\000B3\012\027under_replicated_ranges\020\006\032\014\010\001\020@\030\0000\000P\024`\000 \0000\000h\000p\000x\000B2\012\026over_replicated_ranges\020\007\032\014\010\001\020@\030\0000\000P\024`\000 \0000\000h\000p\000x\000H\010R`\012\007primary\020\001\030\001"\007zone_id"\012subzone_id0\0010\002@\000@\000J\020\010\000\020\000\032\000 \000(\0000\0008\000@\000Z\000z\002\010\000\200\001\000\210\001\000\220\001\001\230\001\000\242\001\006\010\000\022\000\030\000\250\001\000\262\001\000\272\001\000`\002j\037\012\012\012\005admin\020\360\003\012\011\012\004root\020\360\003\022\004node\030\001\200\001\001\210\001\003\230\001\000\262\001\216\001\012\007primary\020\000\032\007zone_id\032\012subzone_id\032\011report_id\032\014total_ranges\032\022unavailable_ranges\032\027under_replicated_ranges\032\026over_replicated_ranges \001 \002 \003 \004 \005 \006 \007(\000\270\001\002\302\001\000\350\001\000\362\001\004\010\000\022\000\370\001\000\200\002\000\222\002\000\232\002\000\262\002\000\270\002\000\300\002\035 0a99050a117265706c69636174696f6e5f7374617473181b200128013a0042230a077a6f6e655f696410011a0c0801104018003000501460002000300068007000780042260a0a7375627a6f6e655f696410021a0c0801104018003000501460002000300068007000780042250a097265706f72745f696410031a0c0801104018003000501460002000300068007000780042280a0c746f74616c5f72616e67657310041a0c08011040180030005014600020003000680070007800422e0a12756e617661696c61626c655f72616e67657310051a0c0801104018003000501460002000300068007000780042330a17756e6465725f7265706c6963617465645f72616e67657310061a0c0801104018003000501460002000300068007000780042320a166f7665725f7265706c6963617465645f72616e67657310071a0c08011040180030005014600020003000680070007800480852600a077072696d6172791001180122077a6f6e655f6964220a7375627a6f6e655f696430013002400040004a10080010001a00200028003000380040005a007a020800800100880100900101980100a20106080012001800a80100b20100ba010060026a1f0a0a0a0561646d696e10f0030a090a04726f6f7410f00312046e6f64651801800101880103980100b2018e010a077072696d61727910001a077a6f6e655f69641a0a7375627a6f6e655f69641a097265706f72745f69641a0c746f74616c5f72616e6765731a12756e617661696c61626c655f72616e6765731a17756e6465725f7265706c6963617465645f72616e6765731a166f7665725f7265706c6963617465645f72616e67657320012002200320042005200620072800b80102c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021d 28 \012\241\002\012\014reports_meta\030\034 \001(\001:\000B\036\012\002id\020\001\032\014\010\001\020@\030\0000\000P\024`\000 \0000\000h\000p\000x\000B&\012\011generated\020\002\032\015\010\011\020\000\030\0000\000P\240\011`\000 \0000\000h\000p\000x\000H\003RK\012\007primary\020\001\030\001"\002id0\001@\000J\020\010\000\020\000\032\000 \000(\0000\0008\000@\000Z\000z\002\010\000\200\001\000\210\001\000\220\001\001\230\001\000\242\001\006\010\000\022\000\030\000\250\001\000\262\001\000\272\001\000`\002j\037\012\012\012\005admin\020\360\003\012\011\012\004root\020\360\003\022\004node\030\001\200\001\001\210\001\003\230\001\000\262\001 \012\007primary\020\000\032\002id\032\011generated \001 \002(\000\270\001\001\302\001\000\350\001\000\362\001\004\010\000\022\000\370\001\000\200\002\000\222\002\000\232\002\000\262\002\000\270\002\000\300\002\035 0aa1020a0c7265706f7274735f6d657461181c200128013a00421e0a02696410011a0c0801104018003000501460002000300068007000780042260a0967656e65726174656410021a0d080910001800300050a0096000200030006800700078004803524b0a077072696d6172791001180122026964300140004a10080010001a00200028003000380040005a007a020800800100880100900101980100a20106080012001800a80100b20100ba010060026a1f0a0a0a0561646d696e10f0030a090a04726f6f7410f00312046e6f64651801800101880103980100b201200a077072696d61727910001a0269641a0967656e657261746564200120022800b80101c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021d 30 \012\267\003\012\012namespace2\030\036 \001(\001:\000B$\012\010parentID\020\001\032\014\010\001\020@\030\0000\000P\024`\000 \0000\000h\000p\000x\000B*\012\016parentSchemaID\020\002\032\014\010\001\020@\030\0000\000P\024`\000 \0000\000h\000p\000x\000B \012\004name\020\003\032\014\010\007\020\000\030\0000\000P\031`\000 \0000\000h\000p\000x\000B\036\012\002id\020\004\032\014\010\001\020@\030\0000\000P\024`\000 \0010\000h\000p\000x\000H\005Ro\012\007primary\020\001\030\001"\010parentID"\016parentSchemaID"\004name0\0010\0020\003@\000@\000@\000J\020\010\000\020\000\032\000 \000(\0000\0008\000@\000Z\000z\002\010\000\200\001\000\210\001\000\220\001\001\230\001\000\242\001\006\010\000\022\000\030\000\250\001\000\262\001\000\272\001\000`\002j\035\012\011\012\005admin\0200\012\010\012\004root\0200\022\004node\030\001\200\001\001\210\001\003\230\001\000\262\0013\012\007primary\020\000\032\010parentID\032\016parentSchemaID\032\004name \001 \002 \003(\000\262\001\024\012\010fam_4_id\020\004\032\002id \004(\004\270\001\005\302\001\000\350\001\000\362\001\004\010\000\022\000\370\001\000\200\002\000\222\002\000\232\002\000\262\002\000\270\002\000\300\002\035 0ab7030a0a6e616d65737061636532181e200128013a0042240a08706172656e74494410011a0c08011040180030005014600020003000680070007800422a0a0e706172656e74536368656d61494410021a0c0801104018003000501460002000300068007000780042200a046e616d6510031a0c08071000180030005019600020003000680070007800421e0a02696410041a0c080110401800300050146000200130006800700078004805526f0a077072696d617279100118012208706172656e744944220e706172656e74536368656d61494422046e616d653001300230034000400040004a10080010001a00200028003000380040005a007a020800800100880100900101980100a20106080012001800a80100b20100ba010060026a1d0a090a0561646d696e10300a080a04726f6f74103012046e6f64651801800101880103980100b201330a077072696d61727910001a08706172656e7449441a0e706172656e74536368656d6149441a046e616d652001200220032800b201140a0866616d5f345f696410041a02696420042804b80105c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021d -31 \012\207\004\012\021protected_ts_meta\030\037 \001(\001:\000B+\012\011singleton\020\001\032\014\010\000\020\000\030\0000\000P\020`\000 \000*\004true0\000h\000p\000x\000B#\012\007version\020\002\032\014\010\001\020@\030\0000\000P\024`\000 \0000\000h\000p\000x\000B'\012\013num_records\020\003\032\014\010\001\020@\030\0000\000P\024`\000 \0000\000h\000p\000x\000B%\012\011num_spans\020\004\032\014\010\001\020@\030\0000\000P\024`\000 \0000\000h\000p\000x\000B'\012\013total_bytes\020\005\032\014\010\001\020@\030\0000\000P\024`\000 \0000\000h\000p\000x\000H\006RR\012\007primary\020\001\030\001"\011singleton0\001@\000J\020\010\000\020\000\032\000 \000(\0000\0008\000@\000Z\000z\002\010\000\200\001\000\210\001\000\220\001\001\230\001\000\242\001\006\010\000\022\000\030\000\250\001\000\262\001\000\272\001\000`\002j\037\012\012\012\005admin\020\360\003\012\011\012\004root\020\360\003\022\004node\030\001\200\001\001\210\001\003\230\001\000\242\001$\012\011singleton\022\017check_singleton\030\000(\0010\0008\000\262\001P\012\007primary\020\000\032\011singleton\032\007version\032\013num_records\032\011num_spans\032\013total_bytes \001 \002 \003 \004 \005(\000\270\001\001\302\001\000\350\001\000\362\001\004\010\000\022\000\370\001\000\200\002\000\222\002\000\232\002\000\262\002\000\270\002\000\300\002\035 0a87040a1170726f7465637465645f74735f6d657461181f200128013a00422b0a0973696e676c65746f6e10011a0c08001000180030005010600020002a0474727565300068007000780042230a0776657273696f6e10021a0c0801104018003000501460002000300068007000780042270a0b6e756d5f7265636f72647310031a0c0801104018003000501460002000300068007000780042250a096e756d5f7370616e7310041a0c0801104018003000501460002000300068007000780042270a0b746f74616c5f627974657310051a0c08011040180030005014600020003000680070007800480652520a077072696d61727910011801220973696e676c65746f6e300140004a10080010001a00200028003000380040005a007a020800800100880100900101980100a20106080012001800a80100b20100ba010060026a1f0a0a0a0561646d696e10f0030a090a04726f6f7410f00312046e6f64651801800101880103980100a201240a0973696e676c65746f6e120f636865636b5f73696e676c65746f6e1800280130003800b201500a077072696d61727910001a0973696e676c65746f6e1a0776657273696f6e1a0b6e756d5f7265636f7264731a096e756d5f7370616e731a0b746f74616c5f6279746573200120022003200420052800b80101c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021d +31 "\012\234\004\012\021protected_ts_meta\030\037 \001(\001:\000B.\012\011singleton\020\001\032\014\010\000\020\000\030\0000\000P\020`\000 \000*\004true0\000h\000p\000x\000\200\001\000B&\012\007version\020\002\032\014\010\001\020@\030\0000\000P\024`\000 \0000\000h\000p\000x\000\200\001\000B*\012\013num_records\020\003\032\014\010\001\020@\030\0000\000P\024`\000 \0000\000h\000p\000x\000\200\001\000B(\012\011num_spans\020\004\032\014\010\001\020@\030\0000\000P\024`\000 \0000\000h\000p\000x\000\200\001\000B*\012\013total_bytes\020\005\032\014\010\001\020@\030\0000\000P\024`\000 \0000\000h\000p\000x\000\200\001\000H\006RT\012\007primary\020\001\030\001""\011singleton0\001@\000J\020\010\000\020\000\032\000 \000(\0000\0008\000@\000Z\000z\004\010\000 \000\200\001\000\210\001\000\220\001\002\230\001\000\242\001\006\010\000\022\000\030\000\250\001\000\262\001\000\272\001\000`\002j\035\012\011\012\005admin\0200\012\010\012\004root\0200\022\004node\030\001\200\001\001\210\001\003\230\001\000\242\001$\012\011singleton\022\017check_singleton\030\000(\0010\0008\000\262\001P\012\007primary\020\000\032\011singleton\032\007version\032\013num_records\032\011num_spans\032\013total_bytes \001 \002 \003 \004 \005(\000\270\001\001\302\001\000\350\001\000\362\001\004\010\000\022\000\370\001\000\200\002\000\222\002\000\232\002\000\262\002\000\270\002\000\300\002\035\310\002\000\340\002\000" 0a9c040a1170726f7465637465645f74735f6d657461181f200128013a00422e0a0973696e676c65746f6e10011a0c08001000180030005010600020002a0474727565300068007000780080010042260a0776657273696f6e10021a0c08011040180030005014600020003000680070007800800100422a0a0b6e756d5f7265636f72647310031a0c0801104018003000501460002000300068007000780080010042280a096e756d5f7370616e7310041a0c08011040180030005014600020003000680070007800800100422a0a0b746f74616c5f627974657310051a0c08011040180030005014600020003000680070007800800100480652540a077072696d61727910011801220973696e676c65746f6e300140004a10080010001a00200028003000380040005a007a0408002000800100880100900102980100a20106080012001800a80100b20100ba010060026a1d0a090a0561646d696e10300a080a04726f6f74103012046e6f64651801800101880103980100a201240a0973696e676c65746f6e120f636865636b5f73696e676c65746f6e1800280130003800b201500a077072696d61727910001a0973696e676c65746f6e1a0776657273696f6e1a0b6e756d5f7265636f7264731a096e756d5f7370616e731a0b746f74616c5f6279746573200120022003200420052800b80101c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021dc80200e00200 32 \012\221\004\012\024protected_ts_records\030 \001(\001:\000B\037\012\002id\020\001\032\015\010\016\020\000\030\0000\000P\206\027`\000 \0000\000h\000p\000x\000B\037\012\002ts\020\002\032\015\010\003\020\000\030\0000\000P\244\015`\000 \0000\000h\000p\000x\000B%\012\011meta_type\020\003\032\014\010\007\020\000\030\0000\000P\031`\000 \0000\000h\000p\000x\000B \012\004meta\020\004\032\014\010\010\020\000\030\0000\000P\021`\000 \0010\000h\000p\000x\000B%\012\011num_spans\020\005\032\014\010\001\020@\030\0000\000P\024`\000 \0000\000h\000p\000x\000B!\012\005spans\020\006\032\014\010\010\020\000\030\0000\000P\021`\000 \0000\000h\000p\000x\000B+\012\010verified\020\007\032\014\010\000\020\000\030\0000\000P\020`\000 \000*\005false0\000h\000p\000x\000H\010RK\012\007primary\020\001\030\001"\002id0\001@\000J\020\010\000\020\000\032\000 \000(\0000\0008\000@\000Z\000z\002\010\000\200\001\000\210\001\000\220\001\001\230\001\000\242\001\006\010\000\022\000\030\000\250\001\000\262\001\000\272\001\000`\002j\035\012\011\012\005admin\0200\012\010\012\004root\0200\022\004node\030\001\200\001\001\210\001\003\230\001\000\262\001P\012\007primary\020\000\032\002id\032\002ts\032\011meta_type\032\004meta\032\011num_spans\032\005spans\032\010verified \001 \002 \003 \004 \005 \006 \007(\000\270\001\001\302\001\000\350\001\000\362\001\004\010\000\022\000\370\001\000\200\002\000\222\002\000\232\002\000\262\002\000\270\002\000\300\002\035 0a91040a1470726f7465637465645f74735f7265636f7264731820200128013a00421f0a02696410011a0d080e100018003000508617600020003000680070007800421f0a02747310021a0d080310001800300050a40d60002000300068007000780042250a096d6574615f7479706510031a0c0807100018003000501960002000300068007000780042200a046d65746110041a0c0808100018003000501160002001300068007000780042250a096e756d5f7370616e7310051a0c0801104018003000501460002000300068007000780042210a057370616e7310061a0c08081000180030005011600020003000680070007800422b0a08766572696669656410071a0c08001000180030005010600020002a0566616c736530006800700078004808524b0a077072696d6172791001180122026964300140004a10080010001a00200028003000380040005a007a020800800100880100900101980100a20106080012001800a80100b20100ba010060026a1d0a090a0561646d696e10300a080a04726f6f74103012046e6f64651801800101880103980100b201500a077072696d61727910001a0269641a0274731a096d6574615f747970651a046d6574611a096e756d5f7370616e731a057370616e731a08766572696669656420012002200320042005200620072800b80101c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021d 33 \012\344\002\012\014role_options\030! \001(\001:\000B$\012\010username\020\001\032\014\010\007\020\000\030\0000\000P\031`\000 \0000\000h\000p\000x\000B"\012\006option\020\002\032\014\010\007\020\000\030\0000\000P\031`\000 \0000\000h\000p\000x\000B!\012\005value\020\003\032\014\010\007\020\000\030\0000\000P\031`\000 \0010\000h\000p\000x\000H\004R]\012\007primary\020\001\030\001"\010username"\006option0\0010\002@\000@\000J\020\010\000\020\000\032\000 \000(\0000\0008\000@\000Z\000z\002\010\000\200\001\000\210\001\000\220\001\001\230\001\000\242\001\006\010\000\022\000\030\000\250\001\000\262\001\000\272\001\000`\002j\037\012\012\012\005admin\020\360\003\012\011\012\004root\020\360\003\022\004node\030\001\200\001\001\210\001\003\230\001\000\262\001,\012\007primary\020\000\032\010username\032\006option\032\005value \001 \002 \003(\003\270\001\001\302\001\000\350\001\000\362\001\004\010\000\022\000\370\001\000\200\002\000\222\002\000\232\002\000\262\002\000\270\002\000\300\002\035 0ae4020a0c726f6c655f6f7074696f6e731821200128013a0042240a08757365726e616d6510011a0c0807100018003000501960002000300068007000780042220a066f7074696f6e10021a0c0807100018003000501960002000300068007000780042210a0576616c756510031a0c080710001800300050196000200130006800700078004804525d0a077072696d617279100118012208757365726e616d6522066f7074696f6e30013002400040004a10080010001a00200028003000380040005a007a020800800100880100900101980100a20106080012001800a80100b20100ba010060026a1f0a0a0a0561646d696e10f0030a090a04726f6f7410f00312046e6f64651801800101880103980100b2012c0a077072696d61727910001a08757365726e616d651a066f7074696f6e1a0576616c75652001200220032803b80101c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021d 34 \012\351\002\012\027statement_bundle_chunks\030" \001(\001:\000B.\012\002id\020\001\032\014\010\001\020@\030\0000\000P\024`\000 \000*\016unique_rowid()0\000h\000p\000x\000B'\012\013description\020\002\032\014\010\007\020\000\030\0000\000P\031`\000 \0010\000h\000p\000x\000B \012\004data\020\003\032\014\010\010\020\000\030\0000\000P\021`\000 \0000\000h\000p\000x\000H\004RK\012\007primary\020\001\030\001"\002id0\001@\000J\020\010\000\020\000\032\000 \000(\0000\0008\000@\000Z\000z\002\010\000\200\001\000\210\001\000\220\001\001\230\001\000\242\001\006\010\000\022\000\030\000\250\001\000\262\001\000\272\001\000`\002j\037\012\012\012\005admin\020\360\003\012\011\012\004root\020\360\003\022\004node\030\001\200\001\001\210\001\003\230\001\000\262\001*\012\007primary\020\000\032\002id\032\013description\032\004data \001 \002 \003(\000\270\001\001\302\001\000\350\001\000\362\001\004\010\000\022\000\370\001\000\200\002\000\222\002\000\232\002\000\262\002\000\270\002\000\300\002\035 0ae9020a1773746174656d656e745f62756e646c655f6368756e6b731822200128013a00422e0a02696410011a0c08011040180030005014600020002a0e756e697175655f726f7769642829300068007000780042270a0b6465736372697074696f6e10021a0c0807100018003000501960002001300068007000780042200a046461746110031a0c080810001800300050116000200030006800700078004804524b0a077072696d6172791001180122026964300140004a10080010001a00200028003000380040005a007a020800800100880100900101980100a20106080012001800a80100b20100ba010060026a1f0a0a0a0561646d696e10f0030a090a04726f6f7410f00312046e6f64651801800101880103980100b2012a0a077072696d61727910001a0269641a0b6465736372697074696f6e1a04646174612001200220032800b80101c20100e80100f2010408001200f801008002009202009a0200b20200b80200c0021d diff --git a/pkg/config/system.go b/pkg/config/system.go index beda9ce72176..63e9c928c043 100644 --- a/pkg/config/system.go +++ b/pkg/config/system.go @@ -149,7 +149,7 @@ func (s *SystemConfig) getSystemTenantDesc(key roachpb.Key) *roachpb.Value { // configs through proper channels. // // Getting here outside tests is impossible. - desc := tabledesc.NewImmutable(descpb.TableDescriptor{}).DescriptorProto() + desc := tabledesc.NewBuilder(&descpb.TableDescriptor{}).BuildImmutable().DescriptorProto() var val roachpb.Value if err := val.SetProto(desc); err != nil { panic(err) diff --git a/pkg/config/system_test.go b/pkg/config/system_test.go index ff8d4c8c8f7a..f0f81cd2c8d2 100644 --- a/pkg/config/system_test.go +++ b/pkg/config/system_test.go @@ -68,7 +68,7 @@ func sqlKV(tableID uint32, indexID, descID uint64) roachpb.KeyValue { func descriptor(descID uint64) roachpb.KeyValue { id := descpb.ID(descID) k := catalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, id) - v := tabledesc.NewImmutable(descpb.TableDescriptor{ID: id}) + v := tabledesc.NewBuilder(&descpb.TableDescriptor{ID: id}).BuildImmutable() kv := roachpb.KeyValue{Key: k} if err := kv.Value.SetProto(v.DescriptorProto()); err != nil { panic(err) diff --git a/pkg/kv/kvserver/client_split_test.go b/pkg/kv/kvserver/client_split_test.go index 3cff9b9eef7a..3e8641b53049 100644 --- a/pkg/kv/kvserver/client_split_test.go +++ b/pkg/kv/kvserver/client_split_test.go @@ -1401,7 +1401,7 @@ func TestStoreRangeSystemSplits(t *testing.T) { // We don't care about the value, just the key. id := descpb.ID(i) key := catalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, id) - desc := tabledesc.NewImmutable(descpb.TableDescriptor{ID: id}) + desc := tabledesc.NewBuilder(&descpb.TableDescriptor{ID: id}).BuildImmutable() if err := txn.Put(ctx, key, desc.DescriptorProto()); err != nil { return err } @@ -1470,7 +1470,7 @@ func TestStoreRangeSystemSplits(t *testing.T) { // the descriptor we add. We don't care about the value, just the key. id := descpb.ID(userTableMax) k := catalogkeys.MakeDescMetadataKey(keys.SystemSQLCodec, id) - desc := tabledesc.NewImmutable(descpb.TableDescriptor{ID: id}) + desc := tabledesc.NewBuilder(&descpb.TableDescriptor{ID: id}).BuildImmutable() return txn.Put(ctx, k, desc.DescriptorProto()) }); err != nil { t.Fatal(err) diff --git a/pkg/kv/kvserver/reports/constraint_stats_report_test.go b/pkg/kv/kvserver/reports/constraint_stats_report_test.go index dd13a63f7315..10e122226acf 100644 --- a/pkg/kv/kvserver/reports/constraint_stats_report_test.go +++ b/pkg/kv/kvserver/reports/constraint_stats_report_test.go @@ -863,7 +863,7 @@ func generateTableZone(t table, tableDesc descpb.TableDescriptor) (*zonepb.ZoneC var err error tableZone.SubzoneSpans, err = sql.GenerateSubzoneSpans( nil, uuid.UUID{} /* clusterID */, keys.SystemSQLCodec, - tabledesc.NewImmutable(tableDesc), tableZone.Subzones, false /* hasNewSubzones */) + tabledesc.NewBuilder(&tableDesc).BuildImmutableTable(), tableZone.Subzones, false /* hasNewSubzones */) if err != nil { return nil, errors.Wrap(err, "error generating subzone spans") } @@ -1086,7 +1086,7 @@ func (b *systemConfigBuilder) addTableDesc(id int, tableDesc descpb.TableDescrip } // Use a bogus timestamp for the descriptor modification time. ts := hlc.Timestamp{WallTime: 123} - descpb.MaybeSetDescriptorModificationTimeFromMVCCTimestamp(context.Background(), desc, ts) + descpb.MaybeSetDescriptorModificationTimeFromMVCCTimestamp(desc, ts) var v roachpb.Value if err := v.SetProto(desc); err != nil { panic(err) diff --git a/pkg/kv/kvserver/reports/reporter.go b/pkg/kv/kvserver/reports/reporter.go index dfa6499a3cba..be8abbd8c640 100644 --- a/pkg/kv/kvserver/reports/reporter.go +++ b/pkg/kv/kvserver/reports/reporter.go @@ -450,7 +450,7 @@ func visitAncestors( if err := descVal.GetProto(&desc); err != nil { return false, err } - tableDesc := descpb.TableFromDescriptor(&desc, descVal.Timestamp) + tableDesc, _, _, _ := descpb.FromDescriptorWithMVCCTimestamp(&desc, descVal.Timestamp) // If it's a database, the parent is the default zone. if tableDesc == nil { return visitDefaultZone(ctx, cfg, visitor), nil diff --git a/pkg/migration/migrations/foreign_key_representation_upgrade.go b/pkg/migration/migrations/foreign_key_representation_upgrade.go index 8b732e90c1cf..e930bd2ea344 100644 --- a/pkg/migration/migrations/foreign_key_representation_upgrade.go +++ b/pkg/migration/migrations/foreign_key_representation_upgrade.go @@ -88,7 +88,7 @@ SELECT id, descriptor, crdb_internal_mvcc_timestamp FROM system.descriptor WHERE return false, 0, errors.Wrapf(err, "failed to unmarshal descriptor with ID %d", id) } - t := descpb.TableFromDescriptor(&desc, ts) + t, _, _, _ := descpb.FromDescriptorWithMVCCTimestamp(&desc, ts) if t != nil && !t.Dropped() && tableNeedsFKUpgrade(t) { return false, id, nil } diff --git a/pkg/server/diagnostics/reporter.go b/pkg/server/diagnostics/reporter.go index a40d901d55d8..5ed6cf909d27 100644 --- a/pkg/server/diagnostics/reporter.go +++ b/pkg/server/diagnostics/reporter.go @@ -310,7 +310,8 @@ func (r *Reporter) collectSchemaInfo(ctx context.Context) ([]descpb.TableDescrip if err := kv.ValueProto(&desc); err != nil { return nil, errors.Wrapf(err, "%s: unable to unmarshal SQL descriptor", kv.Key) } - if t := descpb.TableFromDescriptor(&desc, kv.Value.Timestamp); t != nil && t.ID > keys.MaxReservedDescID { + t, _, _, _ := descpb.FromDescriptorWithMVCCTimestamp(&desc, kv.Value.Timestamp) + if t != nil && t.ID > keys.MaxReservedDescID { if err := reflectwalk.Walk(t, redactor); err != nil { panic(err) // stringRedactor never returns a non-nil err } diff --git a/pkg/server/server_test.go b/pkg/server/server_test.go index b95cc23606fa..02e8cfd1df0b 100644 --- a/pkg/server/server_test.go +++ b/pkg/server/server_test.go @@ -625,8 +625,8 @@ func TestSystemConfigGossip(t *testing.T) { return err } - expected := valAt(2).GetDatabase() - db := got.GetDatabase() + _, expected, _, _ := descpb.FromDescriptor(valAt(2)) + _, db, _, _ := descpb.FromDescriptor(&got) if db == nil { panic(errors.Errorf("found nil database: %v", got)) } diff --git a/pkg/sql/alter_database.go b/pkg/sql/alter_database.go index c4f5fec84cc8..026bbf829773 100644 --- a/pkg/sql/alter_database.go +++ b/pkg/sql/alter_database.go @@ -525,6 +525,15 @@ func (n *alterDatabasePrimaryRegionNode) switchPrimaryRegion(params runParams) e ) } + // Get the type descriptor for the multi-region enum. + typeDesc, err := params.p.Descriptors().GetMutableTypeVersionByID( + params.ctx, + params.p.txn, + n.desc.RegionConfig.RegionEnumID) + if err != nil { + return err + } + // To update the primary region we need to modify the database descriptor, update the multi-region // enum, and write a new zone configuration. n.desc.RegionConfig.PrimaryRegion = descpb.RegionName(n.n.PrimaryRegion) @@ -536,15 +545,6 @@ func (n *alterDatabasePrimaryRegionNode) switchPrimaryRegion(params runParams) e return err } - // Get the type descriptor for the multi-region enum. - typeDesc, err := params.p.Descriptors().GetMutableTypeVersionByID( - params.ctx, - params.p.txn, - n.desc.RegionConfig.RegionEnumID) - if err != nil { - return err - } - // Update the primary region in the type descriptor, and write it back out. typeDesc.RegionConfig.PrimaryRegion = descpb.RegionName(n.n.PrimaryRegion) if err := params.p.writeTypeDesc(params.ctx, typeDesc); err != nil { diff --git a/pkg/sql/alter_schema.go b/pkg/sql/alter_schema.go index fae3d9bb907c..d04889763a1a 100644 --- a/pkg/sql/alter_schema.go +++ b/pkg/sql/alter_schema.go @@ -24,6 +24,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/log/eventpb" @@ -189,7 +190,7 @@ func (p *planner) renameSchema( return err } if found { - return pgerror.Newf(pgcode.DuplicateSchema, "schema %q already exists", newName) + return sqlerrors.NewSchemaAlreadyExistsError(newName) } // Ensure that the new name is a valid schema name. diff --git a/pkg/sql/alter_table_set_schema.go b/pkg/sql/alter_table_set_schema.go index 37faff8dfa1d..e77a5cbc062a 100644 --- a/pkg/sql/alter_table_set_schema.go +++ b/pkg/sql/alter_table_set_schema.go @@ -80,7 +80,7 @@ func (p *planner) AlterTableSetSchema( for _, dependent := range tableDesc.DependedOnBy { if !dependent.ByID { return nil, p.dependentViewError( - ctx, tableDesc.TypeName(), tableDesc.Name, + ctx, string(tableDesc.DescriptorType()), tableDesc.Name, tableDesc.ParentID, dependent.ID, "set schema on", ) } diff --git a/pkg/sql/alter_type.go b/pkg/sql/alter_type.go index 98b3da69181a..cf13cafdae83 100644 --- a/pkg/sql/alter_type.go +++ b/pkg/sql/alter_type.go @@ -23,7 +23,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgnotice" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" "github.com/cockroachdb/cockroach/pkg/util/log/eventpb" "github.com/cockroachdb/errors" @@ -214,23 +213,15 @@ func (p *planner) dropEnumValue( } func (p *planner) renameType(ctx context.Context, n *alterTypeNode, newName string) error { - // See if there is a name collision with the new name. - exists, id, err := catalogkv.LookupObjectID( + err := catalogkv.CheckObjectCollision( ctx, p.txn, p.ExecCfg().Codec, n.desc.ParentID, n.desc.ParentSchemaID, - newName, + tree.NewUnqualifiedTypeName(tree.Name(newName)), ) - if err == nil && exists { - // Try and see what kind of object we collided with. - desc, err := catalogkv.GetAnyDescriptorByID(ctx, p.txn, p.ExecCfg().Codec, id, catalogkv.Immutable) - if err != nil { - return sqlerrors.WrapErrorWhileConstructingObjectAlreadyExistsErr(err) - } - return sqlerrors.MakeObjectAlreadyExistsError(desc.DescriptorProto(), newName) - } else if err != nil { + if err != nil { return err } diff --git a/pkg/sql/authorization.go b/pkg/sql/authorization.go index bab3091ffe35..adc1805cfa84 100644 --- a/pkg/sql/authorization.go +++ b/pkg/sql/authorization.go @@ -130,7 +130,7 @@ func (p *planner) CheckPrivilegeForUser( } return pgerror.Newf(pgcode.InsufficientPrivilege, "user %s does not have %s privilege on %s %s", - user, privilege, descriptor.TypeName(), descriptor.GetName()) + user, privilege, descriptor.DescriptorType(), descriptor.GetName()) } // CheckPrivilege implements the AuthorizationAccessor interface. @@ -233,7 +233,7 @@ func (p *planner) CheckAnyPrivilege(ctx context.Context, descriptor catalog.Desc return pgerror.Newf(pgcode.InsufficientPrivilege, "user %s has no privileges on %s %s", - p.SessionData().User(), descriptor.TypeName(), descriptor.GetName()) + p.SessionData().User(), descriptor.DescriptorType(), descriptor.GetName()) } // UserHasAdminRole implements the AuthorizationAccessor interface. diff --git a/pkg/sql/backfill.go b/pkg/sql/backfill.go index 2c01f52d2f30..3248d3108ac6 100644 --- a/pkg/sql/backfill.go +++ b/pkg/sql/backfill.go @@ -800,7 +800,11 @@ func TruncateInterleavedIndexes( // All the data chunks have been removed. Now also removed the // zone configs for the dropped indexes, if any. if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return RemoveIndexZoneConfigs(ctx, txn, execCfg, table.GetParentID(), indexes) + freshTableDesc, err := catalogkv.MustGetTableDescByID(ctx, txn, execCfg.Codec, table.GetID()) + if err != nil { + return err + } + return RemoveIndexZoneConfigs(ctx, txn, execCfg, freshTableDesc, indexes) }); err != nil { return err } @@ -876,7 +880,11 @@ func (sc *SchemaChanger) truncateIndexes( // All the data chunks have been removed. Now also removed the // zone configs for the dropped indexes, if any. if err := sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return RemoveIndexZoneConfigs(ctx, txn, sc.execCfg, sc.descID, dropped) + table, err := catalogkv.MustGetTableDescByID(ctx, txn, sc.execCfg.Codec, sc.descID) + if err != nil { + return err + } + return RemoveIndexZoneConfigs(ctx, txn, sc.execCfg, table, dropped) }); err != nil { return err } @@ -1343,13 +1351,11 @@ func (sc *SchemaChanger) updateJobRunningStatus( ctx context.Context, status jobs.RunningStatus, ) (*tabledesc.Mutable, error) { var tableDesc *tabledesc.Mutable - err := sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - desc, err := catalogkv.GetDescriptorByID(ctx, txn, sc.execCfg.Codec, sc.descID, catalogkv.Mutable, - catalogkv.TableDescriptorKind, true /* required */) + err := sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { + tableDesc, err = catalogkv.MustGetMutableTableDescByID(ctx, txn, sc.execCfg.Codec, sc.descID) if err != nil { return err } - tableDesc = desc.(*tabledesc.Mutable) // Update running status of job. updateJobRunningProgress := false @@ -1859,7 +1865,7 @@ func runSchemaChangesInTxn( // mutations that need to be processed. for i := 0; i < len(tableDesc.Mutations); i++ { m := tableDesc.Mutations[i] - immutDesc := tabledesc.NewImmutable(*tableDesc.TableDesc()) + immutDesc := tabledesc.NewBuilder(tableDesc.TableDesc()).BuildImmutableTable() switch m.Direction { case descpb.DescriptorMutation_ADD: switch m.Descriptor_.(type) { @@ -2328,5 +2334,5 @@ func indexTruncateInTxn( } } // Remove index zone configs. - return RemoveIndexZoneConfigs(ctx, txn, execCfg, tableDesc.GetID(), []descpb.IndexDescriptor{*idx}) + return RemoveIndexZoneConfigs(ctx, txn, execCfg, tableDesc, []descpb.IndexDescriptor{*idx}) } diff --git a/pkg/sql/catalog/catalogkv/BUILD.bazel b/pkg/sql/catalog/catalogkv/BUILD.bazel index 164654a420a6..de3d9880af1e 100644 --- a/pkg/sql/catalog/catalogkv/BUILD.bazel +++ b/pkg/sql/catalog/catalogkv/BUILD.bazel @@ -5,9 +5,9 @@ go_library( name = "catalogkv", srcs = [ "catalogkv.go", + "desc_getter.go", "namespace.go", "test_utils.go", - ":gen-descriptorkind-stringer", # keep ], importpath = "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv", visibility = ["//visibility:public"], @@ -41,6 +41,9 @@ go_test( data = glob(["testdata/**"]), embed = [":catalogkv"], deps = [ + "//pkg/keys", + "//pkg/kv", + "//pkg/roachpb", "//pkg/sql/catalog", "//pkg/sql/catalog/descpb", "//pkg/testutils", diff --git a/pkg/sql/catalog/catalogkv/catalogkv.go b/pkg/sql/catalog/catalogkv/catalogkv.go index 8ef4c2051d79..93715f4aab21 100644 --- a/pkg/sql/catalog/catalogkv/catalogkv.go +++ b/pkg/sql/catalog/catalogkv/catalogkv.go @@ -84,6 +84,33 @@ func ResolveSchemaID( return true, schemaID, nil } +// NewBuilderWithMVCCTimestamp takes a descriptor as deserialized from storage, +// along with its MVCC timestamp, and returns a catalog.DescriptorBuilder object. +// Returns nil if nothing specific is found in desc. +func NewBuilderWithMVCCTimestamp( + desc *descpb.Descriptor, mvccTimestamp hlc.Timestamp, +) catalog.DescriptorBuilder { + table, database, typ, schema := descpb.FromDescriptorWithMVCCTimestamp(desc, mvccTimestamp) + switch { + case table != nil: + return tabledesc.NewBuilder(table) + case database != nil: + return dbdesc.NewBuilder(database) + case typ != nil: + return typedesc.NewBuilder(typ) + case schema != nil: + return schemadesc.NewBuilder(schema) + default: + return nil + } +} + +// NewBuilder is a convenience function which calls NewBuilderWithMVCCTimestamp +// with an empty timestamp. +func NewBuilder(desc *descpb.Descriptor) catalog.DescriptorBuilder { + return NewBuilderWithMVCCTimestamp(desc, hlc.Timestamp{}) +} + // TODO(ajwerner): The below flags are suspiciously similar to the flags passed // to accessor methods. Furthermore we're pretty darn unhappy with the Accessor // API as it provides a handle to the transaction for bad reasons. @@ -92,129 +119,111 @@ func ResolveSchemaID( // lookup flags. It then should get lifted onto an interface that becomes an // argument into the accessor. -// Mutability indicates whether the desired descriptor is mutable. This type -// aids readability. -type Mutability bool +// mutability indicates whether the desired descriptor is mutable. +// This type aids readability. +type mutability bool -// Mutability values. +// mutability values. const ( - Immutable Mutability = false - Mutable Mutability = true + immutable mutability = false + mutable mutability = true ) -//go:generate stringer -type DescriptorKind catalogkv.go - -// DescriptorKind is used to indicate the desired kind of descriptor from -// GetDescriptorByID. -type DescriptorKind int +// required indicates whether the desired descriptor must be found. +// This type aids readability. +type required bool -// List of DescriptorKind values. +// required values. const ( - DatabaseDescriptorKind DescriptorKind = iota - SchemaDescriptorKind - TableDescriptorKind - TypeDescriptorKind - AnyDescriptorKind // permit any kind + bestEffort required = false + mustGet required = true ) -// GetAnyDescriptorByID is a wrapper around GetDescriptorByID which permits -// missing descriptors and does not restrict the requested kind. -func GetAnyDescriptorByID( - ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, id descpb.ID, mutable Mutability, -) (catalog.Descriptor, error) { - return GetDescriptorByID(ctx, txn, codec, id, mutable, AnyDescriptorKind, false /* required */) -} - -// GetDescriptorByID looks up the descriptor for `id`. The descriptor -// will be validated if the requested descriptor is Immutable. -// -// TODO(ajwerner): Fix this odd behavior with validation which is used to hack -// around the fact that mutable descriptors are sometimes looked up while they -// are being mutated and in that period may be invalid with respect to the -// state of other descriptors in the database. Instead we ought to inject a -// higher level interface than a `txn` here for looking up other descriptors -// during validation. Ideally we'd have a handle to the transaction's -// descs.Collection and we'd maintain that when writing or retrieving -// descriptors which have been mutated we wouldn't reach back into the kv store. -func GetDescriptorByID( +// descriptorFromKeyValue unmarshals, hydrates and validates a descriptor from +// a key-value storage entry . +func descriptorFromKeyValue( ctx context.Context, - txn *kv.Txn, codec keys.SQLCodec, - id descpb.ID, - mutable Mutability, - kind DescriptorKind, - required bool, + kv kv.KeyValue, + mutable mutability, + expectedType catalog.DescriptorType, + required required, + dg catalog.DescGetter, + validationLevel catalog.ValidationLevel, ) (catalog.Descriptor, error) { - log.Eventf(ctx, "fetching descriptor with ID %d", id) - descKey := catalogkeys.MakeDescMetadataKey(codec, id) - raw := &descpb.Descriptor{} - ts, err := txn.GetProtoTs(ctx, descKey, raw) + b, err := builderFromKeyValue(codec, kv, expectedType, required) + if err != nil || b == nil { + return nil, err + } + err = b.RunPostDeserializationChanges(ctx, dg) if err != nil { return nil, err } var desc catalog.Descriptor - dg := NewOneLevelUncachedDescGetter(txn, codec) if mutable { - desc, err = unwrapDescriptorMutable(ctx, dg, ts, raw) + desc = b.BuildExistingMutable() } else { - desc, err = unwrapDescriptor(ctx, dg, ts, raw, true /* validate */) + desc = b.BuildImmutable() } + err = catalog.Validate(ctx, dg, validationLevel, desc).CombinedError() if err != nil { return nil, err } - if desc == nil { - if required { - return nil, requiredError(kind, id) - } - return nil, nil - } - if err := desiredKindError(desc, kind, id); err != nil { - return nil, err - } return desc, nil } -func desiredKindError(desc catalog.Descriptor, kind DescriptorKind, id descpb.ID) error { - if kind == AnyDescriptorKind { - return nil +// builderFromKeyValue is a utility function for descriptorFromKeyValue which +// unmarshals the proto and checks that it exists and that it matches the +// expected descriptor subtype. It returns it wrapped in a DescriptorBuilder. +func builderFromKeyValue( + codec keys.SQLCodec, kv kv.KeyValue, expectedType catalog.DescriptorType, required required, +) (catalog.DescriptorBuilder, error) { + var descProto descpb.Descriptor + if err := kv.ValueProto(&descProto); err != nil { + return nil, err } - var kindMismatched bool - switch desc.(type) { - case catalog.DatabaseDescriptor: - kindMismatched = kind != DatabaseDescriptorKind - case catalog.SchemaDescriptor: - kindMismatched = kind != SchemaDescriptorKind - case catalog.TableDescriptor: - kindMismatched = kind != TableDescriptorKind - case catalog.TypeDescriptor: - kindMismatched = kind != TypeDescriptorKind - } - if !kindMismatched { - return nil + var ts hlc.Timestamp + if kv.Value != nil { + ts = kv.Value.Timestamp } - return pgerror.Newf(pgcode.WrongObjectType, - "%q with ID %d is not a %s", desc, log.Safe(id), kind.String()) + b := NewBuilderWithMVCCTimestamp(&descProto, ts) + if b == nil { + if required { + id, err := codec.DecodeDescMetadataID(kv.Key) + if err != nil { + return nil, err + } + return nil, requiredError(expectedType, descpb.ID(id)) + } + return nil, nil + } + if expectedType != catalog.Any && b.DescriptorType() != expectedType { + id, err := codec.DecodeDescMetadataID(kv.Key) + if err != nil { + return nil, err + } + return nil, pgerror.Newf(pgcode.WrongObjectType, + "descriptor with ID %d is not a %s, instead is a %s", id, expectedType, b.DescriptorType()) + } + return b, nil } // requiredError returns an appropriate error when a descriptor which was // required was not found. -// -// TODO(ajwerner): This code is rather upsetting and feels like it duplicates -// some of the logic in physical_accessor.go. -func requiredError(kind DescriptorKind, id descpb.ID) error { +func requiredError(expectedObjectType catalog.DescriptorType, id descpb.ID) error { var err error var wrapper func(descpb.ID, error) error - switch kind { - case TableDescriptorKind: + switch expectedObjectType { + case catalog.Table: err = sqlerrors.NewUndefinedRelationError(&tree.TableRef{TableID: int64(id)}) wrapper = catalog.WrapTableDescRefErr - case DatabaseDescriptorKind: + case catalog.Database: err = sqlerrors.NewUndefinedDatabaseError(fmt.Sprintf("[%d]", id)) wrapper = catalog.WrapDatabaseDescRefErr - case SchemaDescriptorKind: - err = sqlerrors.NewUnsupportedSchemaUsageError(fmt.Sprintf("[%d]", id)) + case catalog.Schema: + err = sqlerrors.NewUndefinedSchemaError(fmt.Sprintf("[%d]", id)) wrapper = catalog.WrapSchemaDescRefErr - case TypeDescriptorKind: + case catalog.Type: err = sqlerrors.NewUndefinedTypeError(tree.NewUnqualifiedTypeName(tree.Name(fmt.Sprintf("[%d]", id)))) wrapper = catalog.WrapTypeDescRefErr default: @@ -224,142 +233,6 @@ func requiredError(kind DescriptorKind, id descpb.ID) error { return errors.CombineErrors(wrapper(id, catalog.ErrDescriptorNotFound), err) } -// NewOneLevelUncachedDescGetter returns a new DescGetter backed by the passed -// Txn. It will use the transaction to resolve mutable descriptors using -// GetDescriptorByID but will pass a nil DescGetter into those lookup calls to -// ensure that the entire graph of dependencies is not traversed. -func NewOneLevelUncachedDescGetter(txn *kv.Txn, codec keys.SQLCodec) catalog.BatchDescGetter { - return &oneLevelUncachedDescGetter{ - txn: txn, - codec: codec, - } -} - -type oneLevelUncachedDescGetter struct { - codec keys.SQLCodec - txn *kv.Txn -} - -func (t *oneLevelUncachedDescGetter) GetDesc( - ctx context.Context, id descpb.ID, -) (catalog.Descriptor, error) { - descKey := catalogkeys.MakeDescMetadataKey(t.codec, id) - raw := &descpb.Descriptor{} - ts, err := t.txn.GetProtoTs(ctx, descKey, raw) - if err != nil { - return nil, err - } - // This mutable unwrapping with a nil desc-getter will avoid doing anything - // crazy. - return unwrapDescriptorMutable(ctx, nil, ts, raw) -} - -func (t *oneLevelUncachedDescGetter) GetDescs( - ctx context.Context, reqs []descpb.ID, -) ([]catalog.Descriptor, error) { - ba := t.txn.NewBatch() - for _, id := range reqs { - descKey := catalogkeys.MakeDescMetadataKey(t.codec, id) - ba.Get(descKey) - } - if err := t.txn.Run(ctx, ba); err != nil { - return nil, err - } - ret := make([]catalog.Descriptor, len(reqs)) - for i, res := range ba.Results { - var desc descpb.Descriptor - if err := res.Rows[0].ValueProto(&desc); err != nil { - return nil, err - } - if desc != (descpb.Descriptor{}) { - unwrapped, err := unwrapDescriptorMutable(ctx, nil, res.Rows[0].Value.Timestamp, &desc) - if err != nil { - return nil, err - } - ret[i] = unwrapped - } - - } - return ret, nil - -} - -var _ catalog.DescGetter = (*oneLevelUncachedDescGetter)(nil) - -// unwrapDescriptor takes a descriptor retrieved using a transaction and unwraps -// it into an immutable implementation of Descriptor. It ensures that -// the ModificationTime is set properly and will validate the descriptor if -// validate is true. -func unwrapDescriptor( - ctx context.Context, - dg catalog.DescGetter, - ts hlc.Timestamp, - desc *descpb.Descriptor, - validate bool, -) (catalog.Descriptor, error) { - descpb.MaybeSetDescriptorModificationTimeFromMVCCTimestamp(ctx, desc, ts) - table, database, typ, schema := descpb.TableFromDescriptor(desc, hlc.Timestamp{}), - desc.GetDatabase(), desc.GetType(), desc.GetSchema() - var unwrapped catalog.Descriptor - switch { - case table != nil: - immTable, err := tabledesc.NewFilledInImmutable(ctx, dg, table) - if err != nil { - return nil, err - } - unwrapped = immTable - case database != nil: - unwrapped = dbdesc.NewImmutable(*database) - case typ != nil: - unwrapped = typedesc.NewImmutable(*typ) - case schema != nil: - unwrapped = schemadesc.NewImmutable(*schema) - default: - return nil, nil - } - if validate { - var level catalog.ValidationLevel - if dg != nil { - level = catalog.ValidationLevelSelfAndCrossReferences - } - if err := catalog.Validate(ctx, dg, level, unwrapped).CombinedError(); err != nil { - return nil, err - } - } - return unwrapped, nil -} - -// unwrapDescriptorMutable takes a descriptor retrieved using a transaction and -// unwraps it into an implementation of catalog.MutableDescriptor. It ensures -// that the ModificationTime is set properly. -func unwrapDescriptorMutable( - ctx context.Context, dg catalog.DescGetter, ts hlc.Timestamp, desc *descpb.Descriptor, -) (catalog.MutableDescriptor, error) { - descpb.MaybeSetDescriptorModificationTimeFromMVCCTimestamp(ctx, desc, ts) - table, database, typ, schema := - descpb.TableFromDescriptor(desc, hlc.Timestamp{}), - desc.GetDatabase(), desc.GetType(), desc.GetSchema() - var err error - var mut catalog.MutableDescriptor - switch { - case table != nil: - mut, err = tabledesc.NewFilledInExistingMutable(ctx, dg, false /* skipFKsWithMissingTable */, table) - case database != nil: - mut, err = dbdesc.NewExistingMutable(*database), nil - case typ != nil: - mut, err = typedesc.NewExistingMutable(*typ), nil - case schema != nil: - mut, err = schemadesc.NewMutableExisting(*schema), nil - } - if mut != nil && err == nil { - err = catalog.ValidateSelf(mut) - } - if err != nil { - return nil, err - } - return mut, nil -} - // CountUserDescriptors returns the number of descriptors present that were // created by the user (i.e. not present when the cluster started). func CountUserDescriptors(ctx context.Context, txn *kv.Txn, codec keys.SQLCodec) (int, error) { @@ -390,27 +263,27 @@ func GetAllDescriptorsUnvalidated( if err != nil { return nil, err } - rawDescs := make([]descpb.Descriptor, len(kvs)) descs := make([]catalog.Descriptor, len(kvs)) dg := NewOneLevelUncachedDescGetter(txn, codec) - const validate = false for i, kv := range kvs { - desc := &rawDescs[i] - if err := kv.ValueProto(desc); err != nil { - return nil, err - } - var err error - if descs[i], err = unwrapDescriptor( - ctx, dg, kv.Value.Timestamp, desc, validate, - ); err != nil { + descs[i], err = descriptorFromKeyValue( + ctx, + codec, + kv, + immutable, + catalog.Any, + bestEffort, + dg, + catalog.NoValidation, + ) + if err != nil { return nil, err } } return descs, nil } -// GetAllDescriptors looks up and returns all available descriptors. If validate -// is set to true, it will also validate them. +// GetAllDescriptors looks up and returns all available descriptors. func GetAllDescriptors( ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, ) ([]catalog.Descriptor, error) { @@ -524,14 +397,34 @@ func GetDatabaseID( return dbID, nil } +// getDescriptorByID looks up the descriptor for `id` in the given `txn`. +func getDescriptorByID( + ctx context.Context, + txn *kv.Txn, + codec keys.SQLCodec, + id descpb.ID, + mutable mutability, + expectedType catalog.DescriptorType, + required required, +) (desc catalog.Descriptor, err error) { + log.Eventf(ctx, "fetching descriptor with ID %d", id) + descKey := catalogkeys.MakeDescMetadataKey(codec, id) + r, err := txn.Get(ctx, descKey) + if err != nil { + return nil, err + } + dg := NewOneLevelUncachedDescGetter(txn, codec) + const level = catalog.ValidationLevelSelfAndCrossReferences + return descriptorFromKeyValue(ctx, codec, r, mutable, expectedType, required, dg, level) +} + // GetDatabaseDescByID looks up the database descriptor given its ID, // returning nil if the descriptor is not found. If you want the "not -// found" condition to return an error, use mustGetDatabaseDescByID() instead. +// found" condition to return an error, use MustGetDatabaseDescByID instead. func GetDatabaseDescByID( ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, id descpb.ID, ) (*dbdesc.Immutable, error) { - desc, err := GetDescriptorByID(ctx, txn, codec, id, Immutable, - DatabaseDescriptorKind, false /* required */) + desc, err := getDescriptorByID(ctx, txn, codec, id, immutable, catalog.Database, bestEffort) if err != nil || desc == nil { return nil, err } @@ -543,21 +436,43 @@ func GetDatabaseDescByID( func MustGetTableDescByID( ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, id descpb.ID, ) (catalog.TableDescriptor, error) { - desc, err := GetDescriptorByID(ctx, txn, codec, id, Immutable, - TableDescriptorKind, true /* required */) - if err != nil || desc == nil { + desc, err := getDescriptorByID(ctx, txn, codec, id, immutable, catalog.Table, mustGet) + if err != nil { return nil, err } return desc.(catalog.TableDescriptor), nil } +// MustGetMutableTableDescByID looks up the mutable table descriptor given its ID, +// returning an error if the table is not found. +func MustGetMutableTableDescByID( + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, id descpb.ID, +) (*tabledesc.Mutable, error) { + desc, err := getDescriptorByID(ctx, txn, codec, id, mutable, catalog.Table, mustGet) + if err != nil { + return nil, err + } + return desc.(*tabledesc.Mutable), nil +} + +// MustGetTypeDescByID looks up the type descriptor given its ID, +// returning an error if the type is not found. +func MustGetTypeDescByID( + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, id descpb.ID, +) (*typedesc.Immutable, error) { + desc, err := getDescriptorByID(ctx, txn, codec, id, immutable, catalog.Type, mustGet) + if err != nil { + return nil, err + } + return desc.(*typedesc.Immutable), nil +} + // MustGetDatabaseDescByID looks up the database descriptor given its ID, // returning an error if the descriptor is not found. func MustGetDatabaseDescByID( ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, id descpb.ID, ) (*dbdesc.Immutable, error) { - desc, err := GetDescriptorByID(ctx, txn, codec, id, Immutable, - DatabaseDescriptorKind, true /* required */) + desc, err := getDescriptorByID(ctx, txn, codec, id, immutable, catalog.Database, mustGet) if err != nil { return nil, err } @@ -569,16 +484,53 @@ func MustGetDatabaseDescByID( func MustGetSchemaDescByID( ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, id descpb.ID, ) (*schemadesc.Immutable, error) { - desc, err := GetDescriptorByID(ctx, txn, codec, id, Immutable, - SchemaDescriptorKind, true /* required */) + desc, err := getDescriptorByID(ctx, txn, codec, id, immutable, catalog.Schema, mustGet) if err != nil { return nil, err } - sc, ok := desc.(*schemadesc.Immutable) - if !ok { - return nil, errors.Newf("descriptor with id %d was not a schema", id) + return desc.(*schemadesc.Immutable), nil +} + +// GetDescriptorByID looks up the descriptor given its ID, +// returning nil if the descriptor is not found. If you want the "not +// found" condition to return an error, use MustGetDescriptorByID instead. +func GetDescriptorByID( + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, id descpb.ID, +) (catalog.Descriptor, error) { + return getDescriptorByID(ctx, txn, codec, id, immutable, catalog.Any, bestEffort) +} + +// MustGetDescriptorByID looks up the descriptor given its ID, +// returning an error if the descriptor is not found. +func MustGetDescriptorByID( + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, id descpb.ID, +) (catalog.Descriptor, error) { + return getDescriptorByID(ctx, txn, codec, id, immutable, catalog.Any, mustGet) +} + +// GetMutableDescriptorByID looks up the mutable descriptor given its ID, +// returning nil if the descriptor is not found. If you want the "not found" +// condition to return an error, use MustGetMutableDescriptorByID instead. +func GetMutableDescriptorByID( + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, id descpb.ID, +) (catalog.MutableDescriptor, error) { + desc, err := getDescriptorByID(ctx, txn, codec, id, mutable, catalog.Any, bestEffort) + if err != nil || desc == nil { + return nil, err } - return sc, nil + return desc.(catalog.MutableDescriptor), err +} + +// MustGetMutableDescriptorByID looks up the mutable descriptor given its ID, +// returning an error if the descriptor is not found. +func MustGetMutableDescriptorByID( + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, id descpb.ID, +) (catalog.MutableDescriptor, error) { + desc, err := getDescriptorByID(ctx, txn, codec, id, mutable, catalog.Any, mustGet) + if err != nil { + return nil, err + } + return desc.(catalog.MutableDescriptor), err } func getDescriptorsFromIDs( @@ -610,24 +562,23 @@ func getDescriptorsFromIDs( len(result.Rows), ) } - desc := &descpb.Descriptor{} - if err := result.Rows[0].ValueProto(desc); err != nil { + desc, err := descriptorFromKeyValue( + ctx, + codec, + result.Rows[0], + immutable, + catalog.Any, + bestEffort, + dg, + catalog.ValidationLevelSelfAndCrossReferences, + ) + if err != nil { return nil, err } - - var catalogDesc catalog.Descriptor - if desc.Union != nil { - var err error - catalogDesc, err = unwrapDescriptor(ctx, dg, result.Rows[0].Value.Timestamp, desc, true) - if err != nil { - return nil, err - } - } - - if catalogDesc == nil { + if desc == nil { return nil, wrapFn(ids[i], catalog.ErrDescriptorNotFound) } - results = append(results, catalogDesc) + results = append(results, desc) } return results, nil } @@ -685,30 +636,54 @@ func GetSchemaDescriptorsFromIDs( return res, nil } -// UnwrapDescriptorRaw takes a descriptor retrieved from a backup manifest or -// as input to the sql doctor and constructs the appropriate MutableDescriptor -// object implied by that object. It assumes and will panic if the -// ModificationTime for the descriptors are already set. -// -// TODO(ajwerner): This may prove problematic for backups of database -// descriptors without modification time. -// -// TODO(ajwerner): unify this with the other unwrapping logic. -func UnwrapDescriptorRaw(ctx context.Context, desc *descpb.Descriptor) catalog.MutableDescriptor { - descpb.MaybeSetDescriptorModificationTimeFromMVCCTimestamp(ctx, desc, hlc.Timestamp{}) - table, database, typ, schema := descpb.TableFromDescriptor(desc, hlc.Timestamp{}), - desc.GetDatabase(), desc.GetType(), desc.GetSchema() - switch { - case table != nil: - return tabledesc.NewExistingMutable(*table) - case database != nil: - return dbdesc.NewExistingMutable(*database) - case typ != nil: - return typedesc.NewExistingMutable(*typ) - case schema != nil: - return schemadesc.NewMutableExisting(*schema) - default: - log.Fatalf(ctx, "failed to unwrap descriptor of type %T", desc.Union) - return nil // unreachable +// GetDescriptorCollidingWithObject looks up the object ID and returns the +// corresponding descriptor if it exists. +func GetDescriptorCollidingWithObject( + ctx context.Context, + txn *kv.Txn, + codec keys.SQLCodec, + parentID descpb.ID, + parentSchemaID descpb.ID, + name string, +) (catalog.Descriptor, error) { + found, id, err := LookupObjectID(ctx, txn, codec, parentID, parentSchemaID, name) + if !found || err != nil { + return nil, err + } + // ID is already in use by another object. + desc, err := GetDescriptorByID(ctx, txn, codec, id) + if desc == nil && err == nil { + return nil, errors.NewAssertionErrorWithWrappedErrf( + catalog.ErrDescriptorNotFound, + "parentID=%d parentSchemaID=%d name=%q has ID=%d", + parentID, parentSchemaID, name, id) + } + if err != nil { + return nil, sqlerrors.WrapErrorWhileConstructingObjectAlreadyExistsErr(err) + } + return desc, nil +} + +// CheckObjectCollision returns an error if an object already exists with the +// same parentID, parentSchemaID and name. +func CheckObjectCollision( + ctx context.Context, + txn *kv.Txn, + codec keys.SQLCodec, + parentID descpb.ID, + parentSchemaID descpb.ID, + name tree.ObjectName, +) error { + desc, err := GetDescriptorCollidingWithObject(ctx, txn, codec, parentID, parentSchemaID, name.Object()) + if err != nil { + return err } + if desc != nil { + maybeQualifiedName := name.Object() + if name.Catalog() != "" && name.Schema() != "" { + maybeQualifiedName = name.FQString() + } + return sqlerrors.MakeObjectAlreadyExistsError(desc.DescriptorProto(), maybeQualifiedName) + } + return nil } diff --git a/pkg/sql/catalog/catalogkv/desc_getter.go b/pkg/sql/catalog/catalogkv/desc_getter.go new file mode 100644 index 000000000000..e0b8efe328b3 --- /dev/null +++ b/pkg/sql/catalog/catalogkv/desc_getter.go @@ -0,0 +1,91 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package catalogkv + +import ( + "context" + + "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" +) + +// NewOneLevelUncachedDescGetter returns a new DescGetter backed by the passed +// Txn. It will use the transaction to resolve mutable descriptors using +// GetDescriptorByID but will pass a nil DescGetter into those lookup calls to +// ensure that the entire graph of dependencies is not traversed. +func NewOneLevelUncachedDescGetter(txn *kv.Txn, codec keys.SQLCodec) catalog.BatchDescGetter { + return &oneLevelUncachedDescGetter{ + txn: txn, + codec: codec, + } +} + +type oneLevelUncachedDescGetter struct { + codec keys.SQLCodec + txn *kv.Txn +} + +var _ catalog.DescGetter = (*oneLevelUncachedDescGetter)(nil) + +func (t *oneLevelUncachedDescGetter) fromKeyValue( + ctx context.Context, kv kv.KeyValue, +) (catalog.Descriptor, error) { + return descriptorFromKeyValue( + ctx, + t.codec, + kv, + immutable, + catalog.Any, + bestEffort, + // We pass a nil DescGetter for several reasons: + // 1. avoid infinite recursion (hence the "oneLevel" aspect), + // 2. avoid any unnecessary and irrelevant post-deserialization changes, + // 3. it's not used by validation at this level anyway. + nil, /* dg */ + catalog.ValidationLevelSelfOnly, + ) +} + +func (t *oneLevelUncachedDescGetter) GetDesc( + ctx context.Context, id descpb.ID, +) (catalog.Descriptor, error) { + descKey := catalogkeys.MakeDescMetadataKey(t.codec, id) + kv, err := t.txn.Get(ctx, descKey) + if err != nil { + return nil, err + } + return t.fromKeyValue(ctx, kv) +} + +func (t *oneLevelUncachedDescGetter) GetDescs( + ctx context.Context, reqs []descpb.ID, +) ([]catalog.Descriptor, error) { + ba := t.txn.NewBatch() + for _, id := range reqs { + descKey := catalogkeys.MakeDescMetadataKey(t.codec, id) + ba.Get(descKey) + } + err := t.txn.Run(ctx, ba) + if err != nil { + return nil, err + } + ret := make([]catalog.Descriptor, len(reqs)) + for i, res := range ba.Results { + ret[i], err = t.fromKeyValue(ctx, res.Rows[0]) + if err != nil { + return nil, err + } + } + return ret, nil +} diff --git a/pkg/sql/catalog/catalogkv/descriptorkind_string.go b/pkg/sql/catalog/catalogkv/descriptorkind_string.go deleted file mode 100644 index 713cbfca1c76..000000000000 --- a/pkg/sql/catalog/catalogkv/descriptorkind_string.go +++ /dev/null @@ -1,27 +0,0 @@ -// Code generated by "stringer -type DescriptorKind catalogkv.go"; DO NOT EDIT. - -package catalogkv - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[DatabaseDescriptorKind-0] - _ = x[SchemaDescriptorKind-1] - _ = x[TableDescriptorKind-2] - _ = x[TypeDescriptorKind-3] - _ = x[AnyDescriptorKind-4] -} - -const _DescriptorKind_name = "DatabaseDescriptorKindSchemaDescriptorKindTableDescriptorKindTypeDescriptorKindAnyDescriptorKind" - -var _DescriptorKind_index = [...]uint8{0, 22, 42, 61, 79, 96} - -func (i DescriptorKind) String() string { - if i < 0 || i >= DescriptorKind(len(_DescriptorKind_index)-1) { - return "DescriptorKind(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _DescriptorKind_name[_DescriptorKind_index[i]:_DescriptorKind_index[i+1]] -} diff --git a/pkg/sql/catalog/catalogkv/test_utils.go b/pkg/sql/catalog/catalogkv/test_utils.go index 45b935de7a11..3e63a0f96327 100644 --- a/pkg/sql/catalog/catalogkv/test_utils.go +++ b/pkg/sql/catalog/catalogkv/test_utils.go @@ -59,8 +59,8 @@ func TestingGetImmutableTableDescriptor( func TestingGetMutableExistingTableDescriptor( kvDB *kv.DB, codec keys.SQLCodec, database string, table string, ) *tabledesc.Mutable { - return tabledesc.NewExistingMutable( - *TestingGetImmutableTableDescriptor(kvDB, codec, database, table).TableDesc()) + return tabledesc.NewBuilder( + TestingGetImmutableTableDescriptor(kvDB, codec, database, table).TableDesc()).BuildExistingMutableTable() } // TestingGetTypeDescriptorFromSchema retrieves a type descriptor directly from @@ -152,8 +152,8 @@ func testingGetObjectDescriptor( } else if !found { panic(fmt.Sprintf("object %s not found", object)) } - desc, err = GetDescriptorByID( - ctx, txn, codec, objectID, Immutable, AnyDescriptorKind, true /* required */) + desc, err = getDescriptorByID( + ctx, txn, codec, objectID, immutable, catalog.Any, true /* required */) if err != nil { panic(err) } diff --git a/pkg/sql/catalog/catalogkv/unwrap_validation_test.go b/pkg/sql/catalog/catalogkv/unwrap_validation_test.go index 1cde4832356b..331bb6a66aa7 100644 --- a/pkg/sql/catalog/catalogkv/unwrap_validation_test.go +++ b/pkg/sql/catalog/catalogkv/unwrap_validation_test.go @@ -19,6 +19,9 @@ import ( "strconv" "testing" + "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/testutils" @@ -55,13 +58,12 @@ func TestUnwrapValidation(t *testing.T) { func unwrapValidationTest(t *testing.T, descriptorCSVPath string) { m := decodeDescriptorDSV(t, descriptorCSVPath) for id, data := range m { - var desc descpb.Descriptor - require.NoError(t, protoutil.Unmarshal(data, &desc)) - ts := descpb.GetDescriptorModificationTime(&desc) - if ts == (hlc.Timestamp{}) { - ts = hlc.Timestamp{WallTime: 1} - } - _, err := unwrapDescriptor(context.Background(), m, ts, &desc, true) + var descProto descpb.Descriptor + require.NoError(t, protoutil.Unmarshal(data, &descProto)) + desc, err := m.GetDesc(context.Background(), id) + require.NoErrorf(t, err, "id: %d", id) + require.NotNilf(t, desc, "id: %d", id) + err = catalog.ValidateSelfAndCrossReferences(context.Background(), m, desc) require.NoErrorf(t, err, "id: %d", id) } } @@ -84,7 +86,20 @@ func (o oneLevelMapDescGetter) GetDesc( if mt == (hlc.Timestamp{}) { mt = hlc.Timestamp{WallTime: 1} } - return unwrapDescriptorMutable(ctx, nil, mt, &desc) + v := roachpb.Value{Timestamp: mt} + if err := v.SetProto(&desc); err != nil { + return nil, err + } + return descriptorFromKeyValue( + ctx, + keys.SQLCodec{}, // dummy value, not used due to passing Any and bestEffort. + kv.KeyValue{Value: &v}, + immutable, + catalog.Any, + bestEffort, + nil, /* dg */ // See oneLevelUncachedDescGetter.fromKeyValuePair(). + catalog.ValidationLevelSelfOnly, + ) } func decodeDescriptorDSV(t *testing.T, descriptorCSVPath string) oneLevelMapDescGetter { diff --git a/pkg/sql/catalog/catformat/testutils_test.go b/pkg/sql/catalog/catformat/testutils_test.go index 028d42436ceb..3b4fa9ec3371 100644 --- a/pkg/sql/catalog/catformat/testutils_test.go +++ b/pkg/sql/catalog/catformat/testutils_test.go @@ -52,10 +52,10 @@ func testTableDesc( Direction: descpb.DescriptorMutation_ADD, } } - return tabledesc.NewImmutable(descpb.TableDescriptor{ + return tabledesc.NewBuilder(&descpb.TableDescriptor{ Name: name, ID: 1, Columns: cols, Mutations: muts, - }) + }).BuildImmutableTable() } diff --git a/pkg/sql/catalog/dbdesc/BUILD.bazel b/pkg/sql/catalog/dbdesc/BUILD.bazel index 4e0524459611..310e77493e33 100644 --- a/pkg/sql/catalog/dbdesc/BUILD.bazel +++ b/pkg/sql/catalog/dbdesc/BUILD.bazel @@ -2,7 +2,10 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "dbdesc", - srcs = ["database_desc.go"], + srcs = [ + "database_desc.go", + "database_desc_builder.go", + ], importpath = "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc", visibility = ["//visibility:public"], deps = [ diff --git a/pkg/sql/catalog/dbdesc/database_desc.go b/pkg/sql/catalog/dbdesc/database_desc.go index 1d84f14862c8..bb25d3cbd827 100644 --- a/pkg/sql/catalog/dbdesc/database_desc.go +++ b/pkg/sql/catalog/dbdesc/database_desc.go @@ -16,13 +16,11 @@ import ( "fmt" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/iterutil" - "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/errors" "github.com/cockroachdb/redact" ) @@ -49,76 +47,6 @@ type Mutable struct { ClusterVersion *Immutable } -// NewInitialOption is an optional argument for NewInitial. -type NewInitialOption func(*descpb.DatabaseDescriptor) - -// NewInitialOptionDatabaseRegionConfig is an option allowing an optional -// regional configuration to be set on the database descriptor. -func NewInitialOptionDatabaseRegionConfig( - regionConfig *descpb.DatabaseDescriptor_RegionConfig, -) NewInitialOption { - return func(desc *descpb.DatabaseDescriptor) { - desc.RegionConfig = regionConfig - } -} - -// NewInitial constructs a new Mutable for an initial version from an id and -// name with default privileges. -func NewInitial( - id descpb.ID, name string, owner security.SQLUsername, options ...NewInitialOption, -) *Mutable { - return NewInitialWithPrivileges( - id, - name, - descpb.NewDefaultPrivilegeDescriptor(owner), - options..., - ) -} - -// NewInitialWithPrivileges constructs a new Mutable for an initial version -// from an id and name and custom privileges. -func NewInitialWithPrivileges( - id descpb.ID, name string, privileges *descpb.PrivilegeDescriptor, options ...NewInitialOption, -) *Mutable { - ret := descpb.DatabaseDescriptor{ - Name: name, - ID: id, - Version: 1, - Privileges: privileges, - } - for _, option := range options { - option(&ret) - } - return NewCreatedMutable(ret) -} - -func makeImmutable(desc descpb.DatabaseDescriptor) Immutable { - return Immutable{DatabaseDescriptor: desc} -} - -// NewImmutable makes a new immutable database descriptor. -func NewImmutable(desc descpb.DatabaseDescriptor) *Immutable { - ret := makeImmutable(desc) - return &ret -} - -// NewCreatedMutable returns a Mutable from the given database descriptor with -// a nil cluster version. This is for a database that is created in the same -// transaction. -func NewCreatedMutable(desc descpb.DatabaseDescriptor) *Mutable { - return &Mutable{Immutable: makeImmutable(desc)} -} - -// NewExistingMutable returns a Mutable from the given database descriptor with -// the cluster version also set to the descriptor. This is for databases that -// already exist. -func NewExistingMutable(desc descpb.DatabaseDescriptor) *Mutable { - return &Mutable{ - Immutable: makeImmutable(*protoutil.Clone(&desc).(*descpb.DatabaseDescriptor)), - ClusterVersion: NewImmutable(desc), - } -} - // SafeMessage makes Immutable a SafeMessager. func (desc *Immutable) SafeMessage() string { return formatSafeMessage("dbdesc.Immutable", desc) @@ -137,9 +65,9 @@ func formatSafeMessage(typeName string, desc catalog.DatabaseDescriptor) string return buf.String() } -// TypeName returns the plain type of this descriptor. -func (desc *Immutable) TypeName() string { - return "database" +// DescriptorType returns the plain type of this descriptor. +func (desc *Immutable) DescriptorType() catalog.DescriptorType { + return catalog.Database } // DatabaseDesc implements the Descriptor interface. @@ -280,11 +208,6 @@ func (desc *Immutable) ValidateSelf(vea catalog.ValidationErrorAccumulator) { vea.Report(fmt.Errorf("invalid database ID %d", desc.GetID())) } - // Fill in any incorrect privileges that may have been missed due to mixed-versions. - // TODO(mberhault): remove this in 2.1 (maybe 2.2) when privilege-fixing migrations have been - // run again and mixed-version clusters always write "good" descriptors. - descpb.MaybeFixPrivileges(desc.ID, &desc.Privileges) - // Validate the privilege descriptor. vea.Report(desc.Privileges.Validate(desc.GetID(), privilege.Database)) @@ -437,9 +360,7 @@ func (desc *Mutable) OriginalVersion() descpb.DescriptorVersion { // ImmutableCopy implements the MutableDescriptor interface. func (desc *Mutable) ImmutableCopy() catalog.Descriptor { - // TODO (lucy): Should the immutable descriptor constructors always make a - // copy, so we don't have to do it here? - imm := NewImmutable(*protoutil.Clone(desc.DatabaseDesc()).(*descpb.DatabaseDescriptor)) + imm := NewBuilder(desc.DatabaseDesc()).BuildImmutableDatabase() imm.isUncommittedVersion = desc.IsUncommittedVersion() return imm } diff --git a/pkg/sql/catalog/dbdesc/database_desc_builder.go b/pkg/sql/catalog/dbdesc/database_desc_builder.go new file mode 100644 index 000000000000..836a86f8f6a2 --- /dev/null +++ b/pkg/sql/catalog/dbdesc/database_desc_builder.go @@ -0,0 +1,151 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package dbdesc + +import ( + "context" + + "github.com/cockroachdb/cockroach/pkg/security" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/util/protoutil" +) + +// DatabaseDescriptorBuilder is an extension of catalog.DescriptorBuilder +// for database descriptors. +type DatabaseDescriptorBuilder interface { + catalog.DescriptorBuilder + BuildImmutableDatabase() *Immutable + BuildExistingMutableDatabase() *Mutable + BuildCreatedMutableDatabase() *Mutable +} + +type databaseDescriptorBuilder struct { + original *descpb.DatabaseDescriptor + maybeModified *descpb.DatabaseDescriptor +} + +var _ DatabaseDescriptorBuilder = &databaseDescriptorBuilder{} + +// NewBuilder creates a new catalog.DescriptorBuilder object for building +// database descriptors. +func NewBuilder(desc *descpb.DatabaseDescriptor) DatabaseDescriptorBuilder { + return &databaseDescriptorBuilder{ + original: protoutil.Clone(desc).(*descpb.DatabaseDescriptor), + } +} + +// DescriptorType implements the catalog.DescriptorBuilder interface. +func (ddb *databaseDescriptorBuilder) DescriptorType() catalog.DescriptorType { + return catalog.Database +} + +// RunPostDeserializationChanges implements the catalog.DescriptorBuilder +// interface. +func (ddb *databaseDescriptorBuilder) RunPostDeserializationChanges( + _ context.Context, _ catalog.DescGetter, +) error { + // Fill in any incorrect privileges that may have been missed due to mixed-versions. + // TODO(mberhault): remove this in 2.1 (maybe 2.2) when privilege-fixing migrations have been + // run again and mixed-version clusters always write "good" descriptors. + ddb.maybeModified = protoutil.Clone(ddb.original).(*descpb.DatabaseDescriptor) + descpb.MaybeFixPrivileges(ddb.maybeModified.ID, &ddb.maybeModified.Privileges) + return nil +} + +// BuildImmutable implements the catalog.DescriptorBuilder interface. +func (ddb *databaseDescriptorBuilder) BuildImmutable() catalog.Descriptor { + return ddb.BuildImmutableDatabase() +} + +// BuildImmutableDatabase returns an immutable database descriptor. +func (ddb *databaseDescriptorBuilder) BuildImmutableDatabase() *Immutable { + desc := ddb.maybeModified + if desc == nil { + desc = ddb.original + } + return &Immutable{DatabaseDescriptor: *desc} +} + +// BuildExistingMutable implements the catalog.DescriptorBuilder interface. +func (ddb *databaseDescriptorBuilder) BuildExistingMutable() catalog.MutableDescriptor { + return ddb.BuildExistingMutableDatabase() +} + +// BuildExistingMutableDatabase returns a mutable descriptor for a database +// which already exists. +func (ddb *databaseDescriptorBuilder) BuildExistingMutableDatabase() *Mutable { + if ddb.maybeModified == nil { + ddb.maybeModified = protoutil.Clone(ddb.original).(*descpb.DatabaseDescriptor) + } + return &Mutable{ + Immutable: Immutable{DatabaseDescriptor: *ddb.maybeModified}, + ClusterVersion: &Immutable{DatabaseDescriptor: *ddb.original}, + } +} + +// BuildCreatedMutable implements the catalog.DescriptorBuilder interface. +func (ddb *databaseDescriptorBuilder) BuildCreatedMutable() catalog.MutableDescriptor { + return ddb.BuildCreatedMutableDatabase() +} + +// BuildCreatedMutableDatabase returns a mutable descriptor for a database +// which is in the process of being created. +func (ddb *databaseDescriptorBuilder) BuildCreatedMutableDatabase() *Mutable { + desc := ddb.maybeModified + if desc == nil { + desc = ddb.original + } + return &Mutable{Immutable: Immutable{DatabaseDescriptor: *desc}} +} + +// NewInitialOption is an optional argument for NewInitial. +type NewInitialOption func(*descpb.DatabaseDescriptor) + +// NewInitialOptionDatabaseRegionConfig is an option allowing an optional +// regional configuration to be set on the database descriptor. +func NewInitialOptionDatabaseRegionConfig( + regionConfig *descpb.DatabaseDescriptor_RegionConfig, +) NewInitialOption { + return func(desc *descpb.DatabaseDescriptor) { + desc.RegionConfig = regionConfig + } +} + +// NewInitial constructs a new Mutable for an initial version from an id and +// name with default privileges. +func NewInitial( + id descpb.ID, name string, owner security.SQLUsername, options ...NewInitialOption, +) *Mutable { + return NewInitialWithPrivileges( + id, + name, + descpb.NewDefaultPrivilegeDescriptor(owner), + options..., + ) +} + +// NewInitialWithPrivileges constructs a new Mutable for an initial version +// from an id and name and custom privileges. +func NewInitialWithPrivileges( + id descpb.ID, name string, privileges *descpb.PrivilegeDescriptor, options ...NewInitialOption, +) *Mutable { + ret := descpb.DatabaseDescriptor{ + Name: name, + ID: id, + Version: 1, + Privileges: privileges, + } + for _, option := range options { + option(&ret) + } + return NewBuilder(&ret).BuildCreatedMutableDatabase() +} diff --git a/pkg/sql/catalog/dbdesc/database_test.go b/pkg/sql/catalog/dbdesc/database_test.go index 0735b79bf616..583e6634d89b 100644 --- a/pkg/sql/catalog/dbdesc/database_test.go +++ b/pkg/sql/catalog/dbdesc/database_test.go @@ -34,21 +34,21 @@ func TestSafeMessage(t *testing.T) { exp string }{ { - desc: NewImmutable(descpb.DatabaseDescriptor{ + desc: NewBuilder(&descpb.DatabaseDescriptor{ ID: 12, Version: 1, State: descpb.DescriptorState_OFFLINE, OfflineReason: "foo", - }), + }).BuildImmutableDatabase(), exp: "dbdesc.Immutable: {ID: 12, Version: 1, ModificationTime: \"0,0\", State: OFFLINE, OfflineReason: \"foo\"}", }, { - desc: NewCreatedMutable(descpb.DatabaseDescriptor{ + desc: NewBuilder(&descpb.DatabaseDescriptor{ ID: 42, Version: 2, State: descpb.DescriptorState_OFFLINE, OfflineReason: "bar", - }), + }).BuildCreatedMutableDatabase(), exp: "dbdesc.Mutable: {ID: 42, Version: 2, IsUncommitted: true, ModificationTime: \"0,0\", State: OFFLINE, OfflineReason: \"bar\"}", }, } { @@ -89,18 +89,18 @@ func TestValidateDatabaseDesc(t *testing.T) { testData := []struct { err string - desc *Immutable + desc descpb.DatabaseDescriptor }{ {`invalid database ID 0`, - NewImmutable(descpb.DatabaseDescriptor{ + descpb.DatabaseDescriptor{ Name: "db", ID: 0, Privileges: descpb.NewDefaultPrivilegeDescriptor(security.RootUserName()), - }), + }, }, { `region "us-east-1" seen twice on db 200`, - NewImmutable(descpb.DatabaseDescriptor{ + descpb.DatabaseDescriptor{ Name: "multi-region-db", ID: 200, RegionConfig: &descpb.DatabaseDescriptor_RegionConfig{ @@ -111,11 +111,11 @@ func TestValidateDatabaseDesc(t *testing.T) { PrimaryRegion: "us-east-1", }, Privileges: descpb.NewDefaultPrivilegeDescriptor(security.RootUserName()), - }), + }, }, { `primary region unset on a multi-region db 200`, - NewImmutable(descpb.DatabaseDescriptor{ + descpb.DatabaseDescriptor{ Name: "multi-region-db", ID: 200, RegionConfig: &descpb.DatabaseDescriptor_RegionConfig{ @@ -124,11 +124,11 @@ func TestValidateDatabaseDesc(t *testing.T) { }, }, Privileges: descpb.NewDefaultPrivilegeDescriptor(security.RootUserName()), - }), + }, }, { `primary region not found in list of regions on db 200`, - NewImmutable(descpb.DatabaseDescriptor{ + descpb.DatabaseDescriptor{ Name: "multi-region-db", ID: 200, RegionConfig: &descpb.DatabaseDescriptor_RegionConfig{ @@ -138,13 +138,14 @@ func TestValidateDatabaseDesc(t *testing.T) { PrimaryRegion: "us-east-2", }, Privileges: descpb.NewDefaultPrivilegeDescriptor(security.RootUserName()), - }), + }, }, } for i, d := range testData { t.Run(d.err, func(t *testing.T) { - expectedErr := fmt.Sprintf("%s %q (%d): %s", d.desc.TypeName(), d.desc.GetName(), d.desc.GetID(), d.err) - if err := catalog.ValidateSelf(d.desc); err == nil { + desc := NewBuilder(&d.desc).BuildImmutable() + expectedErr := fmt.Sprintf("%s %q (%d): %s", desc.DescriptorType(), desc.GetName(), desc.GetID(), d.err) + if err := catalog.ValidateSelf(desc); err == nil { t.Errorf("%d: expected \"%s\", but found success: %+v", i, expectedErr, d.desc) } else if expectedErr != err.Error() { t.Errorf("%d: expected \"%s\", but found \"%+v\"", i, expectedErr, err) @@ -299,15 +300,15 @@ func TestValidateCrossDatabaseReferences(t *testing.T) { privilege := descpb.NewDefaultPrivilegeDescriptor(security.AdminRoleName()) descs := catalog.MapDescGetter{} test.desc.Privileges = privilege - desc := NewImmutable(test.desc) + desc := NewBuilder(&test.desc).BuildImmutable() descs[test.desc.ID] = desc test.multiRegionEnum.Privileges = privilege - descs[test.multiRegionEnum.ID] = typedesc.NewImmutable(test.multiRegionEnum) + descs[test.multiRegionEnum.ID] = typedesc.NewBuilder(&test.multiRegionEnum).BuildImmutable() for _, schemaDesc := range test.schemaDescs { schemaDesc.Privileges = privilege - descs[schemaDesc.ID] = schemadesc.NewImmutable(schemaDesc) + descs[schemaDesc.ID] = schemadesc.NewBuilder(&schemaDesc).BuildImmutable() } - expectedErr := fmt.Sprintf("%s %q (%d): %s", desc.TypeName(), desc.GetName(), desc.GetID(), test.err) + expectedErr := fmt.Sprintf("%s %q (%d): %s", desc.DescriptorType(), desc.GetName(), desc.GetID(), test.err) const validateCrossReferencesOnly = catalog.ValidationLevelSelfAndCrossReferences &^ (catalog.ValidationLevelSelfAndCrossReferences >> 1) if err := catalog.Validate(ctx, descs, validateCrossReferencesOnly, desc).CombinedError(); err == nil { if test.err != "" { diff --git a/pkg/sql/catalog/descpb/descriptor.go b/pkg/sql/catalog/descpb/descriptor.go index a8d3d34dce49..f2966f79d7ba 100644 --- a/pkg/sql/catalog/descpb/descriptor.go +++ b/pkg/sql/catalog/descpb/descriptor.go @@ -144,9 +144,7 @@ func setDescriptorModificationTime(desc *Descriptor, ts hlc.Timestamp) { // // It is vital that users which read table descriptor values from the KV store // call this method. -func MaybeSetDescriptorModificationTimeFromMVCCTimestamp( - ctx context.Context, desc *Descriptor, ts hlc.Timestamp, -) { +func MaybeSetDescriptorModificationTimeFromMVCCTimestamp(desc *Descriptor, ts hlc.Timestamp) { switch t := desc.Union.(type) { case nil: // Empty descriptors shouldn't be touched. @@ -198,30 +196,41 @@ func MaybeSetDescriptorModificationTimeFromMVCCTimestamp( } } -// TableFromDescriptor is a replacement for GetTable() which seeks to ensure -// that clients which unmarshal Descriptor structs properly set the -// ModificationTime on tables based on the MVCC timestamp at which the -// descriptor was read. -// -// A linter should ensure that GetTable() is not called. +// FromDescriptorWithMVCCTimestamp is a replacement for +// Get(Table|Database|Type|Schema)() methods which seeks to ensure that clients +// which unmarshal Descriptor structs properly set the ModificationTime based on +// the MVCC timestamp at which the descriptor was read. // -// TODO(ajwerner): Now that all descriptors have their modification time set -// this way, this function should be retired and similar or better safeguards -// for all descriptors should be pursued. -func TableFromDescriptor(desc *Descriptor, ts hlc.Timestamp) *TableDescriptor { - //nolint:descriptormarshal - t := desc.GetTable() - if t != nil { - MaybeSetDescriptorModificationTimeFromMVCCTimestamp(context.TODO(), desc, ts) +// A linter check ensures that GetTable() et al. are not called elsewhere unless +// absolutely necessary. +func FromDescriptorWithMVCCTimestamp( + desc *Descriptor, ts hlc.Timestamp, +) ( + table *TableDescriptor, + database *DatabaseDescriptor, + typ *TypeDescriptor, + schema *SchemaDescriptor, +) { + if desc == nil { + return nil, nil, nil, nil } - return t + //nolint:descriptormarshal + table = desc.GetTable() + //nolint:descriptormarshal + database = desc.GetDatabase() + //nolint:descriptormarshal + typ = desc.GetType() + //nolint:descriptormarshal + schema = desc.GetSchema() + MaybeSetDescriptorModificationTimeFromMVCCTimestamp(desc, ts) + return table, database, typ, schema } -// TypeFromDescriptor is the same thing as TableFromDescriptor, but for types. -func TypeFromDescriptor(desc *Descriptor, ts hlc.Timestamp) *TypeDescriptor { - t := desc.GetType() - if t != nil { - MaybeSetDescriptorModificationTimeFromMVCCTimestamp(context.TODO(), desc, ts) - } - return t +// FromDescriptor is a convenience function for FromDescriptorWithMVCCTimestamp +// called with an empty timestamp. As a result this does not modify the +// descriptor. +func FromDescriptor( + desc *Descriptor, +) (*TableDescriptor, *DatabaseDescriptor, *TypeDescriptor, *SchemaDescriptor) { + return FromDescriptorWithMVCCTimestamp(desc, hlc.Timestamp{}) } diff --git a/pkg/sql/catalog/descpb/privilege.go b/pkg/sql/catalog/descpb/privilege.go index 887dba317bae..5daad847d396 100644 --- a/pkg/sql/catalog/descpb/privilege.go +++ b/pkg/sql/catalog/descpb/privilege.go @@ -266,36 +266,40 @@ func MaybeFixPrivileges(id ID, ptr **PrivilegeDescriptor) bool { return modified } -// Validate is called when writing a database, table or type descriptor. -// It takes the descriptor ID which is used to determine if -// it belongs to a system descriptor, in which case the maximum -// set of allowed privileges is looked up and applied. +// Validate returns an error if the privilege descriptor is invalid. +// It requires the ID of the descriptor it is applied on to determine whether +// it is is a system descriptor, because superusers do not always have full +// privileges for those. +// It requires the objectType to determine the superset of privileges allowed +// for regular users. func (p PrivilegeDescriptor) Validate(id ID, objectType privilege.ObjectType) error { - allowedPrivileges := DefaultSuperuserPrivileges + allowedSuperuserPrivileges := DefaultSuperuserPrivileges + maybeSystem := "" if IsReservedID(id) { var ok bool - allowedPrivileges, ok = SystemAllowedPrivileges[id] + maybeSystem = "system " + allowedSuperuserPrivileges, ok = SystemAllowedPrivileges[id] if !ok { - return fmt.Errorf("no allowed privileges found for system %s with ID=%d", - objectType, id) + return fmt.Errorf("no allowed privileges defined for %s%s with ID=%d", + maybeSystem, objectType, id) } } // Check "root" user. - if err := p.validateRequiredSuperuser(id, allowedPrivileges, security.RootUserName(), objectType); err != nil { + if err := p.validateRequiredSuperuser(id, allowedSuperuserPrivileges, security.RootUserName(), objectType); err != nil { return err } // We expect an "admin" role. Check that it has desired superuser permissions. - if err := p.validateRequiredSuperuser(id, allowedPrivileges, security.AdminRoleName(), objectType); err != nil { + if err := p.validateRequiredSuperuser(id, allowedSuperuserPrivileges, security.AdminRoleName(), objectType); err != nil { return err } if p.Version >= OwnerVersion { if p.Owner().Undefined() { - return errors.AssertionFailedf("found no owner for system %s with ID=%d", - objectType, id) + return errors.AssertionFailedf("found no owner for %s%s with ID=%d", + maybeSystem, objectType, id) } } @@ -310,8 +314,8 @@ func (p PrivilegeDescriptor) Validate(id ID, objectType privilege.ObjectType) er } if remaining := u.Privileges &^ allowedPrivilegesBits; remaining != 0 { - return fmt.Errorf("user %s must not have %s privileges on system %s with ID=%d", - u.User(), privilege.ListFromBitField(remaining, privilege.Any), objectType, id) + return fmt.Errorf("user %s must not have %s privileges on %s%s with ID=%d", + u.User(), privilege.ListFromBitField(remaining, privilege.Any), maybeSystem, objectType, id) } // Get all the privilege bits set on the descriptor even if they're not valid. privs := privilege.ListFromBitField(u.Privileges, privilege.Any) @@ -331,16 +335,20 @@ func (p PrivilegeDescriptor) validateRequiredSuperuser( user security.SQLUsername, objectType privilege.ObjectType, ) error { + maybeSystem := "" + if IsReservedID(id) { + maybeSystem = "system " + } superPriv, ok := p.findUser(user) if !ok { - return fmt.Errorf("user %s does not have privileges over system %s with ID=%d", - user, objectType, id) + return fmt.Errorf("user %s does not have privileges over %s%s with ID=%d", + user, maybeSystem, objectType, id) } // The super users must match the allowed privilege set exactly. if superPriv.Privileges != allowedPrivileges.ToBitField() { - return fmt.Errorf("user %s must have exactly %s privileges on system %s with ID=%d", - user, allowedPrivileges, objectType, id) + return fmt.Errorf("user %s must have exactly %s privileges on %s%s with ID=%d", + user, allowedPrivileges, maybeSystem, objectType, id) } return nil diff --git a/pkg/sql/catalog/descpb/privilege_test.go b/pkg/sql/catalog/descpb/privilege_test.go index 8d434f27c7dd..e5192c7d3f48 100644 --- a/pkg/sql/catalog/descpb/privilege_test.go +++ b/pkg/sql/catalog/descpb/privilege_test.go @@ -350,9 +350,9 @@ func TestSystemPrivilegeValidate(t *testing.T) { defer delete(SystemAllowedPrivileges, id) rootWrongPrivilegesErr := "user root must have exactly SELECT, GRANT " + - "privileges on system table with ID=.*" + "privileges on (system )?table with ID=.*" adminWrongPrivilegesErr := "user admin must have exactly SELECT, GRANT " + - "privileges on system table with ID=.*" + "privileges on (system )?table with ID=.*" { // Valid: root user has one of the allowable privilege sets. diff --git a/pkg/sql/catalog/descriptor.go b/pkg/sql/catalog/descriptor.go index a84b8dfb0b65..adf07ae28a2a 100644 --- a/pkg/sql/catalog/descriptor.go +++ b/pkg/sql/catalog/descriptor.go @@ -27,6 +27,55 @@ import ( "github.com/cockroachdb/redact" ) +// DescriptorType is a symbol representing the (sub)type of a descriptor. +type DescriptorType string + +const ( + // Any represents any descriptor. + Any DescriptorType = "any" + + // Database is for database descriptors. + Database = "database" + + // Table is for table descriptors. + Table = "relation" + + // Type is for type descriptors. + Type = "type" + + // Schema is for schema descriptors. + Schema = "schema" +) + +// DescriptorBuilder interfaces are used to build catalog.Descriptor +// objects. +type DescriptorBuilder interface { + + // DescriptorType returns a symbol identifying the type of the descriptor + // built by this builder. + DescriptorType() DescriptorType + + // RunPostDeserializationChanges attempts to perform post-deserialization + // changes to the descriptor being built. + // These changes are always done on a best-effort basis, meaning that all + // arguments other than ctx are optional. As of writing this, the only other + // argument is a DescGetter and a nil value will cause table foreign-key + // representation upgrades to be skipped. + RunPostDeserializationChanges(ctx context.Context, dg DescGetter) error + + // BuildImmutable returns an immutable Descriptor. + BuildImmutable() Descriptor + + // BuildExistingMutable returns a MutableDescriptor with the cluster version + // set to the original value of the descriptor used to initialize the builder. + // This is for descriptors that already exist. + BuildExistingMutable() MutableDescriptor + + // BuildCreatedMutable returns a MutableDescriptor with a nil cluster version. + // This is for a descriptor that is created in the same transaction. + BuildCreatedMutable() MutableDescriptor +} + // IndexOpts configures the behavior of catalog.ForEachIndex and // catalog.FindIndex. type IndexOpts struct { @@ -61,7 +110,7 @@ type Descriptor interface { GetDrainingNames() []descpb.NameInfo GetPrivileges() *descpb.PrivilegeDescriptor - TypeName() string + DescriptorType() DescriptorType GetAuditMode() descpb.TableDescriptor_AuditMode Public() bool @@ -525,9 +574,9 @@ func FilterDescriptorState(desc Descriptor, flags tree.CommonLookupFlags) error case desc.Dropped() && !flags.IncludeDropped: return NewInactiveDescriptorError(ErrDescriptorDropped) case desc.Offline() && !flags.IncludeOffline: - err := errors.Errorf("%s %q is offline", desc.TypeName(), desc.GetName()) + err := errors.Errorf("%s %q is offline", desc.DescriptorType(), desc.GetName()) if desc.GetOfflineReason() != "" { - err = errors.Errorf("%s %q is offline: %s", desc.TypeName(), desc.GetName(), desc.GetOfflineReason()) + err = errors.Errorf("%s %q is offline: %s", desc.DescriptorType(), desc.GetName(), desc.GetOfflineReason()) } return NewInactiveDescriptorError(err) case desc.Adding(): diff --git a/pkg/sql/catalog/descriptor_test.go b/pkg/sql/catalog/descriptor_test.go index 0c9a0f896a4e..ba40d63714df 100644 --- a/pkg/sql/catalog/descriptor_test.go +++ b/pkg/sql/catalog/descriptor_test.go @@ -29,41 +29,41 @@ func TestFormatSafeDescriptorProperties(t *testing.T) { exp string }{ { - desc: tabledesc.NewImmutable(descpb.TableDescriptor{ + desc: tabledesc.NewBuilder(&descpb.TableDescriptor{ ID: 27, Version: 2, ParentID: 12, State: descpb.DescriptorState_ADD, - }), + }).BuildImmutable(), exp: "ID: 27, Version: 2, ModificationTime: \"0,0\", ParentID: 12, ParentSchemaID: 29, State: ADD", }, { - desc: schemadesc.NewImmutable(descpb.SchemaDescriptor{ + desc: schemadesc.NewBuilder(&descpb.SchemaDescriptor{ ID: 12, Version: 1, ParentID: 2, State: descpb.DescriptorState_OFFLINE, OfflineReason: "foo", - }), + }).BuildImmutable(), exp: "ID: 12, Version: 1, ModificationTime: \"0,0\", ParentID: 2, State: OFFLINE, OfflineReason: \"foo\"", }, { - desc: dbdesc.NewCreatedMutable(descpb.DatabaseDescriptor{ + desc: dbdesc.NewBuilder(&descpb.DatabaseDescriptor{ ID: 12, Version: 1, State: descpb.DescriptorState_PUBLIC, - }), + }).BuildCreatedMutable(), exp: "ID: 12, Version: 1, IsUncommitted: true, ModificationTime: \"0,0\", State: PUBLIC", }, { desc: func() catalog.Descriptor { - desc := tabledesc.NewExistingMutable(descpb.TableDescriptor{ + desc := tabledesc.NewBuilder(&descpb.TableDescriptor{ ID: 27, Version: 2, ParentID: 12, UnexposedParentSchemaID: 51, State: descpb.DescriptorState_PUBLIC, - }) + }).BuildExistingMutableTable() desc.MaybeIncrementVersion() desc.AddDrainingName(descpb.NameInfo{ ParentID: 12, diff --git a/pkg/sql/catalog/descs/collection.go b/pkg/sql/catalog/descs/collection.go index 8da19c4bafd1..b0f34f4583b9 100644 --- a/pkg/sql/catalog/descs/collection.go +++ b/pkg/sql/catalog/descs/collection.go @@ -323,7 +323,7 @@ func (tc *Collection) getDescriptorFromStore( } } // Always pick up a mutable copy so it can be cached. - desc, err = catalogkv.GetAnyDescriptorByID(ctx, txn, codec, descID, catalogkv.Mutable) + desc, err = catalogkv.GetMutableDescriptorByID(ctx, txn, codec, descID) if err != nil { return false, nil, err } else if desc == nil && isSystemDescriptor { @@ -388,8 +388,7 @@ func (tc *Collection) getDatabaseByName( // that callers of this method will check the privileges on the descriptor // (like any other database) and return an error. if mutable { - return true, dbdesc.NewExistingMutable( - *systemschema.MakeSystemDatabaseDesc().DatabaseDesc()), nil + return true, dbdesc.NewBuilder(systemschema.MakeSystemDatabaseDesc().DatabaseDesc()).BuildExistingMutableDatabase(), nil } return true, systemschema.MakeSystemDatabaseDesc(), nil } @@ -997,17 +996,16 @@ func (tc *Collection) getDescriptorByIDMaybeSetTxnDeadline( // Always pick up a mutable copy so it can be cached. // TODO (lucy): If the descriptor doesn't exist, should we generate our // own error here instead of using the one from catalogkv? - desc, err := catalogkv.GetDescriptorByID(ctx, txn, tc.codec(), id, - catalogkv.Mutable, catalogkv.AnyDescriptorKind, true /* required */) + desc, err := catalogkv.MustGetMutableDescriptorByID(ctx, txn, tc.codec(), id) if err != nil { return nil, err } - ud, err := tc.addUncommittedDescriptor(desc.(catalog.MutableDescriptor)) + ud, err := tc.addUncommittedDescriptor(desc) if err != nil { return nil, err } if !mutable { - desc = ud.immutable + return ud.immutable, nil } return desc, nil } @@ -1258,7 +1256,10 @@ func (tc *Collection) hydrateTypesInTableDesc( if err := typedesc.HydrateTypesInTableDescriptor(ctx, descBase, getType); err != nil { return nil, err } - return tabledesc.NewImmutableWithIsUncommittedVersion(*descBase, t.IsUncommittedVersion()), nil + if t.IsUncommittedVersion() { + return tabledesc.NewBuilderForUncommittedVersion(descBase).BuildImmutableTable(), nil + } + return tabledesc.NewBuilder(descBase).BuildImmutableTable(), nil default: return desc, nil } diff --git a/pkg/sql/catalog/descs/collection_test.go b/pkg/sql/catalog/descs/collection_test.go index 7b437a315e3f..c148547cb91c 100644 --- a/pkg/sql/catalog/descs/collection_test.go +++ b/pkg/sql/catalog/descs/collection_test.go @@ -66,7 +66,7 @@ func TestCollectionWriteDescToBatch(t *testing.T) { require.NotNil(t, mut) // We want to create some descriptors and then ensure that writing them to a // batch works as expected. - newTable := tabledesc.NewCreatedMutable(descpb.TableDescriptor{ + newTable := tabledesc.NewBuilder(&descpb.TableDescriptor{ ID: 142, Name: "table2", Version: 1, @@ -97,7 +97,7 @@ func TestCollectionWriteDescToBatch(t *testing.T) { NextIndexID: 2, NextMutationID: 1, FormatVersion: descpb.FamilyFormatVersion, - }) + }).BuildCreatedMutableTable() b := txn.NewBatch() // Ensure that there are no errors and that the version is incremented. diff --git a/pkg/sql/catalog/hydratedtables/hydratedcache.go b/pkg/sql/catalog/hydratedtables/hydratedcache.go index 036d7928d43d..7e6288a71212 100644 --- a/pkg/sql/catalog/hydratedtables/hydratedcache.go +++ b/pkg/sql/catalog/hydratedtables/hydratedcache.go @@ -223,7 +223,7 @@ func (c *Cache) GetHydratedTableDescriptor( if err := typedesc.HydrateTypesInTableDescriptor(ctx, descBase, &cachedRes); err != nil { return nil, err } - hydrated := tabledesc.NewImmutable(*descBase) + hydrated := tabledesc.NewBuilder(descBase).BuildImmutableTable() // If any of the types resolved as part of hydration are modified, skip // writing this descriptor to the cache. diff --git a/pkg/sql/catalog/hydratedtables/hydratedcache_test.go b/pkg/sql/catalog/hydratedtables/hydratedcache_test.go index a40409830048..70bba1739143 100644 --- a/pkg/sql/catalog/hydratedtables/hydratedcache_test.go +++ b/pkg/sql/catalog/hydratedtables/hydratedcache_test.go @@ -84,7 +84,7 @@ func TestHydratedCache(t *testing.T) { assertMetrics(t, m, 0, 1) // Change the database name. - dbDesc := dbdesc.NewExistingMutable(*dg[dbID].(*dbdesc.Immutable).DatabaseDesc()) + dbDesc := dbdesc.NewBuilder(dg[dbID].(catalog.DatabaseDescriptor).DatabaseDesc()).BuildExistingMutableDatabase() dbDesc.SetName("new_name") dbDesc.Version++ dg[dbID] = dbDesc.ImmutableCopy() @@ -152,9 +152,9 @@ func TestHydratedCache(t *testing.T) { assertMetrics(t, m, 0, 1) // Change the type descriptor. - typDesc := typedesc.NewExistingMutable(*dg[typ1ID].(*typedesc.Immutable).TypeDesc()) + typDesc := typedesc.NewBuilder(dg[typ1ID].(catalog.TypeDescriptor).TypeDesc()).BuildExistingMutableType() typDesc.Version++ - dg[typ1ID] = typedesc.NewImmutable(*typDesc.TypeDesc()) + dg[typ1ID] = typedesc.NewBuilder(typDesc.TypeDesc()).BuildImmutable() // Ensure that a new descriptor is returned. retrieved, err := c.GetHydratedTableDescriptor(ctx, td, res) @@ -200,7 +200,7 @@ func TestHydratedCache(t *testing.T) { c := NewCache(cluster.MakeTestingClusterSettings()) dg := mkDescGetter(descs...) res := &descGetterTypeDescriptorResolver{dg: &dg} - mut := tabledesc.NewExistingMutable(*dg[tableUDTID].(catalog.TableDescriptor).TableDesc()) + mut := tabledesc.NewBuilder(dg[tableUDTID].(catalog.TableDescriptor).TableDesc()).BuildExistingMutable() mut.MaybeIncrementVersion() td := mut.ImmutableCopy().(catalog.TableDescriptor) hydrated, err := c.GetHydratedTableDescriptor(ctx, td, res) @@ -214,7 +214,7 @@ func TestHydratedCache(t *testing.T) { dg := mkDescGetter(descs...) res := &descGetterTypeDescriptorResolver{dg: &dg} - mut := typedesc.NewExistingMutable(*dg[typ1ID].(catalog.TypeDescriptor).TypeDesc()) + mut := typedesc.NewBuilder(dg[typ1ID].(catalog.TypeDescriptor).TypeDesc()).BuildExistingMutable() mut.MaybeIncrementVersion() dgWithMut := mkDescGetter(append(descs, mut)...) resWithMut := &descGetterTypeDescriptorResolver{dg: &dgWithMut} @@ -289,11 +289,11 @@ const ( // server. var ( dbDesc = dbdesc.NewInitial(dbID, "db", security.RootUserName()) - schemaDesc = schemadesc.NewCreatedMutable(descpb.SchemaDescriptor{ + schemaDesc = schemadesc.NewBuilder(&descpb.SchemaDescriptor{ Name: "schema", ID: scID, ParentID: dbID, - }) + }).BuildCreatedMutable() enumMembers = []descpb.TypeDescriptor_EnumMember{ { LogicalRepresentation: "hello", @@ -305,7 +305,7 @@ var ( }, } - typ1Desc = typedesc.NewExistingMutable(descpb.TypeDescriptor{ + typ1Desc = typedesc.NewBuilder(&descpb.TypeDescriptor{ Name: "enum", ID: typ1ID, Version: 1, @@ -315,12 +315,12 @@ var ( Kind: descpb.TypeDescriptor_ENUM, ReferencingDescriptorIDs: []descpb.ID{tableUDTID}, EnumMembers: enumMembers, - }) - typ1Name = tree.MakeNewQualifiedTypeName(dbDesc.Name, schemaDesc.Name, typ1Desc.Name) + }).BuildExistingMutableType() + typ1Name = tree.MakeNewQualifiedTypeName(dbDesc.Name, schemaDesc.GetName(), typ1Desc.Name) typ1T = mkTypeT(typ1Desc, &typ1Name) typ1TSerialized = &types.T{InternalType: typ1T.InternalType} - typ2Desc = typedesc.NewExistingMutable(descpb.TypeDescriptor{ + typ2Desc = typedesc.NewBuilder(&descpb.TypeDescriptor{ Name: "other_enum", ID: typ2ID, Version: 1, @@ -330,11 +330,11 @@ var ( Kind: descpb.TypeDescriptor_ENUM, ReferencingDescriptorIDs: []descpb.ID{tableUDTID}, EnumMembers: enumMembers, - }) - typ2Name = tree.MakeNewQualifiedTypeName(dbDesc.Name, schemaDesc.Name, typ2Desc.Name) + }).BuildExistingMutableType() + typ2Name = tree.MakeNewQualifiedTypeName(dbDesc.Name, schemaDesc.GetName(), typ2Desc.Name) typ2T = mkTypeT(typ2Desc, &typ2Name) typ2TSerialized = &types.T{InternalType: typ2T.InternalType} - tableDescUDT = tabledesc.NewExistingMutable(descpb.TableDescriptor{ + tableDescUDT = tabledesc.NewBuilder(&descpb.TableDescriptor{ Name: "foo", ID: tableUDTID, Version: 1, @@ -345,8 +345,8 @@ var ( {Name: "b", ID: 1, Type: typ2TSerialized}, {Name: "c", ID: 1, Type: typ1TSerialized}, }, - }) - tableDescNoUDT = tabledesc.NewExistingMutable(descpb.TableDescriptor{ + }).BuildExistingMutableTable() + tableDescNoUDT = tabledesc.NewBuilder(&descpb.TableDescriptor{ Name: "bar", ID: tableNoUDTID, Version: 1, @@ -355,7 +355,7 @@ var ( Columns: []descpb.ColumnDescriptor{ {Name: "a", ID: 1, Type: types.Int}, }, - }) + }).BuildExistingMutableTable() descs = []catalog.MutableDescriptor{ dbDesc, schemaDesc, typ1Desc, typ2Desc, tableDescUDT, tableDescNoUDT, } diff --git a/pkg/sql/catalog/lease/helpers_test.go b/pkg/sql/catalog/lease/helpers_test.go index 0cc2271f26c5..d98746ed21b0 100644 --- a/pkg/sql/catalog/lease/helpers_test.go +++ b/pkg/sql/catalog/lease/helpers_test.go @@ -165,14 +165,13 @@ func (m *Manager) PublishMultiple( for _, id := range ids { // Re-read the current versions of the descriptor, this time // transactionally. - desc, err := catalogkv.GetDescriptorByID(ctx, txn, m.storage.codec, id, catalogkv.Mutable, - catalogkv.AnyDescriptorKind, true /* required */) + desc, err := catalogkv.MustGetMutableDescriptorByID(ctx, txn, m.storage.codec, id) // Due to details in #51417, it is possible for a user to request a // descriptor which no longer exists. In that case, just return an error. if err != nil { return err } - descsToUpdate[id] = desc.(catalog.MutableDescriptor) + descsToUpdate[id] = desc if expectedVersions[id] != desc.GetVersion() { // The version changed out from under us. Someone else must be // performing a schema change operation. diff --git a/pkg/sql/catalog/lease/lease.go b/pkg/sql/catalog/lease/lease.go index 985a9e0155fb..53ebf4eeb64b 100644 --- a/pkg/sql/catalog/lease/lease.go +++ b/pkg/sql/catalog/lease/lease.go @@ -232,8 +232,7 @@ func (s storage) acquire( // to ValidateSelf() instead of Validate(), to avoid the cross-table // checks. Does this actually matter? We already potentially do cross-table // checks when populating pre-19.2 foreign keys. - desc, err := catalogkv.GetDescriptorByID(ctx, txn, s.codec, id, catalogkv.Immutable, - catalogkv.AnyDescriptorKind, true /* required */) + desc, err := catalogkv.MustGetDescriptorByID(ctx, txn, s.codec, id) if err != nil { return err } @@ -345,8 +344,7 @@ func (m *Manager) WaitForOneVersion( for lastCount, r := 0, retry.Start(retryOpts); r.Next(); { var desc catalog.Descriptor if err := m.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { - desc, err = catalogkv.GetDescriptorByID(ctx, txn, m.Codec(), id, catalogkv.Immutable, - catalogkv.AnyDescriptorKind, true /* required */) + desc, err = catalogkv.MustGetDescriptorByID(ctx, txn, m.Codec(), id) return err }); err != nil { return 0, err @@ -433,8 +431,7 @@ func (s storage) getForExpiration( err := s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { prevTimestamp := expiration.Prev() txn.SetFixedTimestamp(ctx, prevTimestamp) - desc, err := catalogkv.GetDescriptorByID(ctx, txn, s.codec, id, catalogkv.Immutable, - catalogkv.AnyDescriptorKind, true /* required */) + desc, err := catalogkv.MustGetDescriptorByID(ctx, txn, s.codec, id) if err != nil { return err } @@ -1859,7 +1856,7 @@ func (m *Manager) watchForRangefeedUpdates( if descriptor.Union == nil { return } - descpb.MaybeSetDescriptorModificationTimeFromMVCCTimestamp(ctx, &descriptor, ev.Value.Timestamp) + descpb.MaybeSetDescriptorModificationTimeFromMVCCTimestamp(&descriptor, ev.Value.Timestamp) id, version, name, _ := descpb.GetDescriptorMetadata(&descriptor) if log.V(2) { log.Infof(ctx, "%s: refreshing lease on descriptor: %d (%s), version: %d", @@ -1901,7 +1898,7 @@ func (m *Manager) handleUpdatedSystemCfg( if descriptor.Union == nil { return } - descpb.MaybeSetDescriptorModificationTimeFromMVCCTimestamp(ctx, &descriptor, kv.Value.Timestamp) + descpb.MaybeSetDescriptorModificationTimeFromMVCCTimestamp(&descriptor, kv.Value.Timestamp) id, version, name, _ := descpb.GetDescriptorMetadata(&descriptor) if log.V(2) { log.Infof(ctx, "%s: refreshing lease on descriptor: %d (%s), version: %d", diff --git a/pkg/sql/catalog/lease/lease_internal_test.go b/pkg/sql/catalog/lease/lease_internal_test.go index 83602a64c85d..e5225abc5d0d 100644 --- a/pkg/sql/catalog/lease/lease_internal_test.go +++ b/pkg/sql/catalog/lease/lease_internal_test.go @@ -85,18 +85,14 @@ func TestTableSet(t *testing.T) { switch op := d.op.(type) { case insert: s := &descriptorVersionState{ - Descriptor: tabledesc.NewImmutable( - descpb.TableDescriptor{Version: op.version}, - ), + Descriptor: tabledesc.NewBuilder(&descpb.TableDescriptor{Version: op.version}).BuildImmutable(), } s.expiration = hlc.Timestamp{WallTime: op.expiration} set.insert(s) case remove: s := &descriptorVersionState{ - Descriptor: tabledesc.NewImmutable( - descpb.TableDescriptor{Version: op.version}, - ), + Descriptor: tabledesc.NewBuilder(&descpb.TableDescriptor{Version: op.version}).BuildImmutable(), } s.expiration = hlc.Timestamp{WallTime: op.expiration} set.remove(s) diff --git a/pkg/sql/catalog/lease/lease_test.go b/pkg/sql/catalog/lease/lease_test.go index 1bf2222c2bc6..a7fd780f98bc 100644 --- a/pkg/sql/catalog/lease/lease_test.go +++ b/pkg/sql/catalog/lease/lease_test.go @@ -1694,7 +1694,7 @@ CREATE TABLE t.test0 (k CHAR PRIMARY KEY, v CHAR); t.Fatalf("error while reading proto: %v", err) } // Look at the descriptor that comes back from the database. - dbTable := descpb.TableFromDescriptor(dbDesc, ts) + dbTable, _, _, _ := descpb.FromDescriptorWithMVCCTimestamp(dbDesc, ts) if dbTable.Version != table.GetVersion() || dbTable.ModificationTime != table.GetModificationTime() { t.Fatalf("db has version %d at ts %s, expected version %d at ts %s", @@ -2323,8 +2323,8 @@ func TestLeaseWithOfflineTables(t *testing.T) { var lmKnobs lease.ManagerTestingKnobs blockDescRefreshed := make(chan struct{}, 1) lmKnobs.TestingDescriptorRefreshedEvent = func(desc *descpb.Descriptor) { - t := descpb.TableFromDescriptor(desc, hlc.Timestamp{}) - if t != nil && testTableID() == t.ID { + tbl, _, _, _ := descpb.FromDescriptor(desc) + if tbl != nil && testTableID() == tbl.ID { blockDescRefreshed <- struct{}{} } } diff --git a/pkg/sql/catalog/schemadesc/BUILD.bazel b/pkg/sql/catalog/schemadesc/BUILD.bazel index b85ff078598b..631654726544 100644 --- a/pkg/sql/catalog/schemadesc/BUILD.bazel +++ b/pkg/sql/catalog/schemadesc/BUILD.bazel @@ -2,7 +2,10 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "schemadesc", - srcs = ["schema_desc.go"], + srcs = [ + "schema_desc.go", + "schema_desc_builder.go", + ], importpath = "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc", visibility = ["//visibility:public"], deps = [ diff --git a/pkg/sql/catalog/schemadesc/schema_desc.go b/pkg/sql/catalog/schemadesc/schema_desc.go index 7762092928ff..9206ec2a1c7b 100644 --- a/pkg/sql/catalog/schemadesc/schema_desc.go +++ b/pkg/sql/catalog/schemadesc/schema_desc.go @@ -22,7 +22,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/util/hlc" - "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/errors" "github.com/cockroachdb/redact" ) @@ -74,40 +73,6 @@ type Mutable struct { var _ redact.SafeMessager = (*Immutable)(nil) -// NewMutableExisting returns a Mutable from the -// given schema descriptor with the cluster version also set to the descriptor. -// This is for schemas that already exist. -func NewMutableExisting(desc descpb.SchemaDescriptor) *Mutable { - return &Mutable{ - Immutable: makeImmutable(*protoutil.Clone(&desc).(*descpb.SchemaDescriptor)), - ClusterVersion: NewImmutable(desc), - } -} - -// NewImmutable makes a new Schema descriptor. -func NewImmutable(desc descpb.SchemaDescriptor) *Immutable { - m := makeImmutable(desc) - return &m -} - -func makeImmutable(desc descpb.SchemaDescriptor) Immutable { - return Immutable{SchemaDescriptor: desc} -} - -// Reference these functions to defeat the linter. -var ( - _ = NewImmutable -) - -// NewCreatedMutable returns a Mutable from the -// given SchemaDescriptor with the cluster version being the zero schema. This -// is for a schema that is created within the current transaction. -func NewCreatedMutable(desc descpb.SchemaDescriptor) *Mutable { - return &Mutable{ - Immutable: makeImmutable(desc), - } -} - // SetDrainingNames implements the MutableDescriptor interface. func (desc *Mutable) SetDrainingNames(names []descpb.NameInfo) { desc.DrainingNames = names @@ -128,9 +93,9 @@ func (desc *Immutable) GetAuditMode() descpb.TableDescriptor_AuditMode { return descpb.TableDescriptor_DISABLED } -// TypeName implements the DescriptorProto interface. -func (desc *Immutable) TypeName() string { - return "schema" +// DescriptorType implements the DescriptorProto interface. +func (desc *Immutable) DescriptorType() catalog.DescriptorType { + return catalog.Schema } // SchemaDesc implements the Descriptor interface. @@ -273,10 +238,8 @@ func (desc *Mutable) OriginalVersion() descpb.DescriptorVersion { // ImmutableCopy implements the MutableDescriptor interface. func (desc *Mutable) ImmutableCopy() catalog.Descriptor { - // TODO (lucy): Should the immutable descriptor constructors always make a - // copy, so we don't have to do it here? - imm := NewImmutable(*protoutil.Clone(desc.SchemaDesc()).(*descpb.SchemaDescriptor)) - imm.isUncommittedVersion = desc.IsUncommittedVersion() + imm := NewBuilder(desc.SchemaDesc()).BuildImmutable() + imm.(*Immutable).isUncommittedVersion = desc.IsUncommittedVersion() return imm } diff --git a/pkg/sql/catalog/schemadesc/schema_desc_builder.go b/pkg/sql/catalog/schemadesc/schema_desc_builder.go new file mode 100644 index 000000000000..069e517c5206 --- /dev/null +++ b/pkg/sql/catalog/schemadesc/schema_desc_builder.go @@ -0,0 +1,91 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package schemadesc + +import ( + "context" + + "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/util/protoutil" +) + +// SchemaDescriptorBuilder is an extension of catalog.DescriptorBuilder +// for schema descriptors. +type SchemaDescriptorBuilder interface { + catalog.DescriptorBuilder + BuildImmutableSchema() *Immutable + BuildExistingMutableSchema() *Mutable + BuildCreatedMutableSchema() *Mutable +} + +type schemaDescriptorBuilder struct { + original *descpb.SchemaDescriptor +} + +var _ SchemaDescriptorBuilder = &schemaDescriptorBuilder{} + +// NewBuilder creates a new catalog.DescriptorBuilder object for building +// schema descriptors. +func NewBuilder(desc *descpb.SchemaDescriptor) SchemaDescriptorBuilder { + return &schemaDescriptorBuilder{ + original: protoutil.Clone(desc).(*descpb.SchemaDescriptor), + } +} + +// DescriptorType implements the catalog.DescriptorBuilder interface. +func (sdb *schemaDescriptorBuilder) DescriptorType() catalog.DescriptorType { + return catalog.Schema +} + +// RunPostDeserializationChanges implements the catalog.DescriptorBuilder +// interface. +func (sdb *schemaDescriptorBuilder) RunPostDeserializationChanges( + _ context.Context, _ catalog.DescGetter, +) error { + return nil +} + +// BuildImmutable implements the catalog.DescriptorBuilder interface. +func (sdb *schemaDescriptorBuilder) BuildImmutable() catalog.Descriptor { + return sdb.BuildImmutableSchema() +} + +// BuildImmutableSchema returns an immutable schema descriptor. +func (sdb *schemaDescriptorBuilder) BuildImmutableSchema() *Immutable { + return &Immutable{SchemaDescriptor: *sdb.original} +} + +// BuildExistingMutable implements the catalog.DescriptorBuilder interface. +func (sdb *schemaDescriptorBuilder) BuildExistingMutable() catalog.MutableDescriptor { + return sdb.BuildExistingMutableSchema() +} + +// BuildExistingMutableSchema returns a mutable descriptor for a schema +// which already exists. +func (sdb *schemaDescriptorBuilder) BuildExistingMutableSchema() *Mutable { + desc := protoutil.Clone(sdb.original).(*descpb.SchemaDescriptor) + return &Mutable{ + Immutable: Immutable{SchemaDescriptor: *desc}, + ClusterVersion: &Immutable{SchemaDescriptor: *sdb.original}, + } +} + +// BuildCreatedMutable implements the catalog.DescriptorBuilder interface. +func (sdb *schemaDescriptorBuilder) BuildCreatedMutable() catalog.MutableDescriptor { + return sdb.BuildCreatedMutableSchema() +} + +// BuildCreatedMutableSchema returns a mutable descriptor for a schema +// which is in the process of being created. +func (sdb *schemaDescriptorBuilder) BuildCreatedMutableSchema() *Mutable { + return &Mutable{Immutable: Immutable{SchemaDescriptor: *sdb.original}} +} diff --git a/pkg/sql/catalog/schemadesc/schema_desc_test.go b/pkg/sql/catalog/schemadesc/schema_desc_test.go index 1987ea17ddc8..41516df4a478 100644 --- a/pkg/sql/catalog/schemadesc/schema_desc_test.go +++ b/pkg/sql/catalog/schemadesc/schema_desc_test.go @@ -28,27 +28,27 @@ import ( func TestSafeMessage(t *testing.T) { for _, tc := range []struct { - desc catalog.SchemaDescriptor + desc catalog.Descriptor exp string }{ { - desc: schemadesc.NewImmutable(descpb.SchemaDescriptor{ + desc: schemadesc.NewBuilder(&descpb.SchemaDescriptor{ ID: 12, Version: 1, ParentID: 2, State: descpb.DescriptorState_OFFLINE, OfflineReason: "foo", - }), + }).BuildImmutable(), exp: "schemadesc.Immutable: {ID: 12, Version: 1, ModificationTime: \"0,0\", ParentID: 2, State: OFFLINE, OfflineReason: \"foo\"}", }, { - desc: schemadesc.NewCreatedMutable(descpb.SchemaDescriptor{ + desc: schemadesc.NewBuilder(&descpb.SchemaDescriptor{ ID: 42, Version: 1, ParentID: 2, State: descpb.DescriptorState_OFFLINE, OfflineReason: "bar", - }), + }).BuildCreatedMutable(), exp: "schemadesc.Mutable: {ID: 42, Version: 1, IsUncommitted: true, ModificationTime: \"0,0\", ParentID: 2, State: OFFLINE, OfflineReason: \"bar\"}", }, } { @@ -166,11 +166,11 @@ func TestValidateCrossSchemaReferences(t *testing.T) { privilege := descpb.NewDefaultPrivilegeDescriptor(security.AdminRoleName()) descs := catalog.MapDescGetter{} test.desc.Privileges = privilege - desc := schemadesc.NewImmutable(test.desc) + desc := schemadesc.NewBuilder(&test.desc).BuildImmutable() descs[test.desc.ID] = desc test.dbDesc.Privileges = privilege - descs[test.dbDesc.ID] = dbdesc.NewImmutable(test.dbDesc) - expectedErr := fmt.Sprintf("%s %q (%d): %s", desc.TypeName(), desc.GetName(), desc.GetID(), test.err) + descs[test.dbDesc.ID] = dbdesc.NewBuilder(&test.dbDesc).BuildImmutable() + expectedErr := fmt.Sprintf("%s %q (%d): %s", desc.DescriptorType(), desc.GetName(), desc.GetID(), test.err) const validateCrossReferencesOnly = catalog.ValidationLevelSelfAndCrossReferences &^ (catalog.ValidationLevelSelfAndCrossReferences >> 1) if err := catalog.Validate(ctx, descs, validateCrossReferencesOnly, desc).CombinedError(); err == nil { if test.err != "" { diff --git a/pkg/sql/catalog/schemaexpr/testutils_test.go b/pkg/sql/catalog/schemaexpr/testutils_test.go index 2ef482edec73..e2bceaea9223 100644 --- a/pkg/sql/catalog/schemaexpr/testutils_test.go +++ b/pkg/sql/catalog/schemaexpr/testutils_test.go @@ -52,10 +52,10 @@ func testTableDesc( Direction: descpb.DescriptorMutation_ADD, } } - return tabledesc.NewImmutable(descpb.TableDescriptor{ + return tabledesc.NewBuilder(&descpb.TableDescriptor{ Name: name, ID: 1, Columns: cols, Mutations: muts, - }) + }).BuildImmutableTable() } diff --git a/pkg/sql/catalog/systemschema/BUILD.bazel b/pkg/sql/catalog/systemschema/BUILD.bazel index f60a315dd431..61fe32723457 100644 --- a/pkg/sql/catalog/systemschema/BUILD.bazel +++ b/pkg/sql/catalog/systemschema/BUILD.bazel @@ -9,6 +9,7 @@ go_library( "//pkg/keys", "//pkg/roachpb", "//pkg/security", + "//pkg/sql/catalog", "//pkg/sql/catalog/dbdesc", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/tabledesc", diff --git a/pkg/sql/catalog/systemschema/system.go b/pkg/sql/catalog/systemschema/system.go index e8a82fc529f0..2d4adf3e4674 100644 --- a/pkg/sql/catalog/systemschema/system.go +++ b/pkg/sql/catalog/systemschema/system.go @@ -17,6 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" @@ -381,15 +382,19 @@ const SystemDatabaseName = "system" // MakeSystemDatabaseDesc constructs a copy of the system database // descriptor. -func MakeSystemDatabaseDesc() *dbdesc.Immutable { - return dbdesc.NewImmutable(descpb.DatabaseDescriptor{ +func MakeSystemDatabaseDesc() catalog.DatabaseDescriptor { + return dbdesc.NewBuilder(&descpb.DatabaseDescriptor{ Name: SystemDatabaseName, ID: keys.SystemDatabaseID, Version: 1, // Assign max privileges to root user. Privileges: descpb.NewCustomSuperuserPrivilegeDescriptor( descpb.SystemAllowedPrivileges[keys.SystemDatabaseID], security.NodeUserName()), - }) + }).BuildImmutableDatabase() +} + +func makeTable(desc descpb.TableDescriptor) catalog.TableDescriptor { + return tabledesc.NewBuilder(&desc).BuildImmutableTable() } // These system config descpb.TableDescriptor literals should match the descriptor @@ -406,7 +411,7 @@ var ( NamespaceTableName = "namespace" // DeprecatedNamespaceTable is the descriptor for the deprecated namespace table. - DeprecatedNamespaceTable = tabledesc.NewImmutable(descpb.TableDescriptor{ + DeprecatedNamespaceTable = makeTable(descpb.TableDescriptor{ Name: NamespaceTableName, ID: keys.DeprecatedNamespaceTableID, ParentID: keys.SystemDatabaseID, @@ -451,7 +456,7 @@ var ( // // TODO(solon): in 20.2, we should change the Name of this descriptor // back to "namespace". - NamespaceTable = tabledesc.NewImmutable(descpb.TableDescriptor{ + NamespaceTable = makeTable(descpb.TableDescriptor{ Name: "namespace2", ID: keys.NamespaceTableID, ParentID: keys.SystemDatabaseID, @@ -487,7 +492,7 @@ var ( }) // DescriptorTable is the descriptor for the descriptor table. - DescriptorTable = tabledesc.NewImmutable(descpb.TableDescriptor{ + DescriptorTable = makeTable(descpb.TableDescriptor{ Name: "descriptor", ID: keys.DescriptorTableID, Privileges: descpb.NewCustomSuperuserPrivilegeDescriptor( @@ -521,7 +526,7 @@ var ( trueBoolString = "true" // UsersTable is the descriptor for the users table. - UsersTable = tabledesc.NewImmutable(descpb.TableDescriptor{ + UsersTable = makeTable(descpb.TableDescriptor{ Name: "users", ID: keys.UsersTableID, ParentID: keys.SystemDatabaseID, @@ -548,7 +553,7 @@ var ( }) // ZonesTable is the descriptor for the zones table. - ZonesTable = tabledesc.NewImmutable(descpb.TableDescriptor{ + ZonesTable = makeTable(descpb.TableDescriptor{ Name: "zones", ID: keys.ZonesTableID, ParentID: keys.SystemDatabaseID, @@ -583,7 +588,7 @@ var ( // SettingsTable is the descriptor for the settings table. // It contains all cluster settings for which a value has been set. - SettingsTable = tabledesc.NewImmutable(descpb.TableDescriptor{ + SettingsTable = makeTable(descpb.TableDescriptor{ Name: "settings", ID: keys.SettingsTableID, ParentID: keys.SystemDatabaseID, @@ -614,7 +619,7 @@ var ( }) // DescIDSequence is the descriptor for the descriptor ID sequence. - DescIDSequence = tabledesc.NewImmutable(descpb.TableDescriptor{ + DescIDSequence = makeTable(descpb.TableDescriptor{ Name: "descriptor_id_seq", ID: keys.DescIDSequenceID, ParentID: keys.SystemDatabaseID, @@ -649,7 +654,7 @@ var ( FormatVersion: descpb.InterleavedFormatVersion, }) - TenantsTable = tabledesc.NewImmutable(descpb.TableDescriptor{ + TenantsTable = makeTable(descpb.TableDescriptor{ Name: "tenants", ID: keys.TenantsTableID, ParentID: keys.SystemDatabaseID, @@ -689,7 +694,7 @@ var ( // suggestions on writing and maintaining them. var ( // LeaseTable is the descriptor for the leases table. - LeaseTable = tabledesc.NewImmutable(descpb.TableDescriptor{ + LeaseTable = makeTable(descpb.TableDescriptor{ Name: "lease", ID: keys.LeaseTableID, ParentID: keys.SystemDatabaseID, @@ -725,7 +730,7 @@ var ( uuidV4String = "uuid_v4()" // EventLogTable is the descriptor for the event log table. - EventLogTable = tabledesc.NewImmutable(descpb.TableDescriptor{ + EventLogTable = makeTable(descpb.TableDescriptor{ Name: "eventlog", ID: keys.EventLogTableID, ParentID: keys.SystemDatabaseID, @@ -770,7 +775,7 @@ var ( uniqueRowIDString = "unique_rowid()" // RangeEventTable is the descriptor for the range log table. - RangeEventTable = tabledesc.NewImmutable(descpb.TableDescriptor{ + RangeEventTable = makeTable(descpb.TableDescriptor{ Name: "rangelog", ID: keys.RangeEventTableID, ParentID: keys.SystemDatabaseID, @@ -812,7 +817,7 @@ var ( }) // UITable is the descriptor for the ui table. - UITable = tabledesc.NewImmutable(descpb.TableDescriptor{ + UITable = makeTable(descpb.TableDescriptor{ Name: "ui", ID: keys.UITableID, ParentID: keys.SystemDatabaseID, @@ -842,7 +847,7 @@ var ( nowTZString = "now():::TIMESTAMPTZ" // JobsTable is the descriptor for the jobs table. - JobsTable = tabledesc.NewImmutable(descpb.TableDescriptor{ + JobsTable = makeTable(descpb.TableDescriptor{ Name: "jobs", ID: keys.JobsTableID, ParentID: keys.SystemDatabaseID, @@ -918,7 +923,7 @@ var ( }) // WebSessions table to authenticate sessions over stateless connections. - WebSessionsTable = tabledesc.NewImmutable(descpb.TableDescriptor{ + WebSessionsTable = makeTable(descpb.TableDescriptor{ Name: "web_sessions", ID: keys.WebSessionsTableID, ParentID: keys.SystemDatabaseID, @@ -984,7 +989,7 @@ var ( }) // TableStatistics table to hold statistics about columns and column groups. - TableStatisticsTable = tabledesc.NewImmutable(descpb.TableDescriptor{ + TableStatisticsTable = makeTable(descpb.TableDescriptor{ Name: "table_statistics", ID: keys.TableStatisticsTableID, ParentID: keys.SystemDatabaseID, @@ -1040,7 +1045,7 @@ var ( latLonDecimal = types.MakeDecimal(18, 15) // LocationsTable is the descriptor for the locations table. - LocationsTable = tabledesc.NewImmutable(descpb.TableDescriptor{ + LocationsTable = makeTable(descpb.TableDescriptor{ Name: "locations", ID: keys.LocationsTableID, ParentID: keys.SystemDatabaseID, @@ -1079,7 +1084,7 @@ var ( }) // RoleMembersTable is the descriptor for the role_members table. - RoleMembersTable = tabledesc.NewImmutable(descpb.TableDescriptor{ + RoleMembersTable = makeTable(descpb.TableDescriptor{ Name: "role_members", ID: keys.RoleMembersTableID, ParentID: keys.SystemDatabaseID, @@ -1146,7 +1151,7 @@ var ( }) // CommentsTable is the descriptor for the comments table. - CommentsTable = tabledesc.NewImmutable(descpb.TableDescriptor{ + CommentsTable = makeTable(descpb.TableDescriptor{ Name: "comments", ID: keys.CommentsTableID, ParentID: keys.SystemDatabaseID, @@ -1180,7 +1185,7 @@ var ( NextMutationID: 1, }) - ReportsMetaTable = tabledesc.NewImmutable(descpb.TableDescriptor{ + ReportsMetaTable = makeTable(descpb.TableDescriptor{ Name: "reports_meta", ID: keys.ReportsMetaTableID, ParentID: keys.SystemDatabaseID, @@ -1222,7 +1227,7 @@ var ( // TODO(andrei): In 20.1 we should add a foreign key reference to the // reports_meta table. Until then, it would cost us having to create an index // on report_id. - ReplicationConstraintStatsTable = tabledesc.NewImmutable(descpb.TableDescriptor{ + ReplicationConstraintStatsTable = makeTable(descpb.TableDescriptor{ Name: "replication_constraint_stats", ID: keys.ReplicationConstraintStatsTableID, ParentID: keys.SystemDatabaseID, @@ -1276,7 +1281,7 @@ var ( // TODO(andrei): In 20.1 we should add a foreign key reference to the // reports_meta table. Until then, it would cost us having to create an index // on report_id. - ReplicationCriticalLocalitiesTable = tabledesc.NewImmutable(descpb.TableDescriptor{ + ReplicationCriticalLocalitiesTable = makeTable(descpb.TableDescriptor{ Name: "replication_critical_localities", ID: keys.ReplicationCriticalLocalitiesTableID, ParentID: keys.SystemDatabaseID, @@ -1327,7 +1332,7 @@ var ( // TODO(andrei): In 20.1 we should add a foreign key reference to the // reports_meta table. Until then, it would cost us having to create an index // on report_id. - ReplicationStatsTable = tabledesc.NewImmutable(descpb.TableDescriptor{ + ReplicationStatsTable = makeTable(descpb.TableDescriptor{ Name: "replication_stats", ID: keys.ReplicationStatsTableID, ParentID: keys.SystemDatabaseID, @@ -1376,7 +1381,7 @@ var ( NextMutationID: 1, }) - ProtectedTimestampsMetaTable = tabledesc.NewImmutable(descpb.TableDescriptor{ + ProtectedTimestampsMetaTable = makeTable(descpb.TableDescriptor{ Name: "protected_ts_meta", ID: keys.ProtectedTimestampsMetaTableID, ParentID: keys.SystemDatabaseID, @@ -1423,12 +1428,12 @@ var ( }, NextIndexID: 2, Privileges: descpb.NewCustomSuperuserPrivilegeDescriptor( - descpb.SystemAllowedPrivileges[keys.ReplicationStatsTableID], security.NodeUserName()), + descpb.SystemAllowedPrivileges[keys.ProtectedTimestampsMetaTableID], security.NodeUserName()), FormatVersion: descpb.InterleavedFormatVersion, NextMutationID: 1, }) - ProtectedTimestampsRecordsTable = tabledesc.NewImmutable(descpb.TableDescriptor{ + ProtectedTimestampsRecordsTable = makeTable(descpb.TableDescriptor{ Name: "protected_ts_records", ID: keys.ProtectedTimestampsRecordsTableID, ParentID: keys.SystemDatabaseID, @@ -1471,7 +1476,7 @@ var ( }) // RoleOptionsTable is the descriptor for the role_options table. - RoleOptionsTable = tabledesc.NewImmutable(descpb.TableDescriptor{ + RoleOptionsTable = makeTable(descpb.TableDescriptor{ Name: "role_options", ID: keys.RoleOptionsTableID, ParentID: keys.SystemDatabaseID, @@ -1508,7 +1513,7 @@ var ( NextMutationID: 1, }) - StatementBundleChunksTable = tabledesc.NewImmutable(descpb.TableDescriptor{ + StatementBundleChunksTable = makeTable(descpb.TableDescriptor{ Name: "statement_bundle_chunks", ID: keys.StatementBundleChunksTableID, ParentID: keys.SystemDatabaseID, @@ -1538,7 +1543,7 @@ var ( // TODO(andrei): Add a foreign key reference to the statement_diagnostics table when // it no longer requires us to create an index on statement_diagnostics_id. - StatementDiagnosticsRequestsTable = tabledesc.NewImmutable(descpb.TableDescriptor{ + StatementDiagnosticsRequestsTable = makeTable(descpb.TableDescriptor{ Name: "statement_diagnostics_requests", ID: keys.StatementDiagnosticsRequestsTableID, ParentID: keys.SystemDatabaseID, @@ -1582,7 +1587,7 @@ var ( NextMutationID: 1, }) - StatementDiagnosticsTable = tabledesc.NewImmutable(descpb.TableDescriptor{ + StatementDiagnosticsTable = makeTable(descpb.TableDescriptor{ Name: "statement_diagnostics", ID: keys.StatementDiagnosticsTableID, ParentID: keys.SystemDatabaseID, @@ -1616,7 +1621,7 @@ var ( }) // ScheduledJobsTable is the descriptor for the scheduled jobs table. - ScheduledJobsTable = tabledesc.NewImmutable(descpb.TableDescriptor{ + ScheduledJobsTable = makeTable(descpb.TableDescriptor{ Name: "scheduled_jobs", ID: keys.ScheduledJobsTableID, ParentID: keys.SystemDatabaseID, @@ -1674,7 +1679,7 @@ var ( }) // SqllivenessTable is the descriptor for the sqlliveness table. - SqllivenessTable = tabledesc.NewImmutable(descpb.TableDescriptor{ + SqllivenessTable = makeTable(descpb.TableDescriptor{ Name: "sqlliveness", ID: keys.SqllivenessID, ParentID: keys.SystemDatabaseID, @@ -1706,7 +1711,7 @@ var ( // MigrationsTable is the descriptor for the migrations table. It stores facts // about the completion state of long-running migrations. It is used to // prevent migrations from running again after they have been completed. - MigrationsTable = tabledesc.NewImmutable(descpb.TableDescriptor{ + MigrationsTable = makeTable(descpb.TableDescriptor{ Name: "migrations", ID: keys.MigrationsID, ParentID: keys.SystemDatabaseID, diff --git a/pkg/sql/catalog/systemschema/system_test.go b/pkg/sql/catalog/systemschema/system_test.go index d577e09363e6..3ef05a9b6555 100644 --- a/pkg/sql/catalog/systemschema/system_test.go +++ b/pkg/sql/catalog/systemschema/system_test.go @@ -25,13 +25,18 @@ import ( ) func TestShouldSplitAtDesc(t *testing.T) { + tbl1 := descpb.TableDescriptor{} + tbl2 := descpb.TableDescriptor{ViewQuery: "SELECT"} + tbl3 := descpb.TableDescriptor{ViewQuery: "SELECT", IsMaterializedView: true} + typ := descpb.TypeDescriptor{} + schema := descpb.SchemaDescriptor{} for inner, should := range map[catalog.Descriptor]bool{ - tabledesc.NewImmutable(descpb.TableDescriptor{}): true, - tabledesc.NewImmutable(descpb.TableDescriptor{ViewQuery: "SELECT"}): false, - tabledesc.NewImmutable(descpb.TableDescriptor{ViewQuery: "SELECT", IsMaterializedView: true}): true, - dbdesc.NewInitial(42, "db", security.AdminRoleName()): false, - typedesc.NewCreatedMutable(descpb.TypeDescriptor{}): false, - schemadesc.NewImmutable(descpb.SchemaDescriptor{}): false, + tabledesc.NewBuilder(&tbl1).BuildImmutable(): true, + tabledesc.NewBuilder(&tbl2).BuildImmutable(): false, + tabledesc.NewBuilder(&tbl3).BuildImmutable(): true, + dbdesc.NewInitial(42, "db", security.AdminRoleName()): false, + typedesc.NewBuilder(&typ).BuildCreatedMutable(): false, + schemadesc.NewBuilder(&schema).BuildImmutable(): false, } { var rawDesc roachpb.Value require.NoError(t, rawDesc.SetProto(inner.DescriptorProto())) diff --git a/pkg/sql/catalog/tabledesc/BUILD.bazel b/pkg/sql/catalog/tabledesc/BUILD.bazel index 484565973824..3401be6dd210 100644 --- a/pkg/sql/catalog/tabledesc/BUILD.bazel +++ b/pkg/sql/catalog/tabledesc/BUILD.bazel @@ -9,6 +9,7 @@ go_library( "structured.go", "table.go", "table_desc.go", + "table_desc_builder.go", "validate.go", ], importpath = "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc", diff --git a/pkg/sql/catalog/tabledesc/structured.go b/pkg/sql/catalog/tabledesc/structured.go index bd2848667ee2..f5bd5e337959 100644 --- a/pkg/sql/catalog/tabledesc/structured.go +++ b/pkg/sql/catalog/tabledesc/structured.go @@ -67,93 +67,6 @@ var ErrMissingPrimaryKey = errors.New("table must contain a primary key") // collected mutations list. var ErrIndexGCMutationsList = errors.New("index in GC mutations list") -// NewCreatedMutable returns a Mutable from the -// given TableDescriptor with the cluster version being the zero table. This -// is for a table that is created in the transaction. -func NewCreatedMutable(tbl descpb.TableDescriptor) *Mutable { - m, _ := NewFilledInExistingMutable(context.TODO(), nil /* DescGetter */, false /* skipFKsWithMissingTable */, &tbl) - return &Mutable{wrapper: m.wrapper} -} - -// NewExistingMutable returns a Mutable from the -// given TableDescriptor with the cluster version also set to the descriptor. -// This is for an existing table. -func NewExistingMutable(tbl descpb.TableDescriptor) *Mutable { - return &Mutable{ - wrapper: wrapper{TableDescriptor: tbl}, - ClusterVersion: tbl, - } -} - -// NewFilledInExistingMutable will construct a Mutable and potentially perform -// post-serialization upgrades. -// -// If skipFKsWithMissingTable is true, the foreign key representation upgrade -// may not fully complete if the other table cannot be found in the ProtoGetter -// but no error will be returned. -func NewFilledInExistingMutable( - ctx context.Context, - dg catalog.DescGetter, - skipFKsWithMissingTable bool, - tbl *descpb.TableDescriptor, -) (*Mutable, error) { - changes, err := maybeFillInDescriptor(ctx, dg, tbl, skipFKsWithMissingTable) - if err != nil { - return nil, err - } - w := wrapper{TableDescriptor: *tbl, postDeserializationChanges: changes} - return &Mutable{wrapper: w, ClusterVersion: *tbl}, nil -} - -// makeImmutable returns an immutable from the given TableDescriptor. -func makeImmutable(tbl descpb.TableDescriptor) immutable { - desc := immutable{wrapper: wrapper{ - TableDescriptor: tbl, - indexCache: newIndexCache(&tbl), - columnCache: newColumnCache(&tbl), - }} - - desc.allChecks = make([]descpb.TableDescriptor_CheckConstraint, len(tbl.Checks)) - for i, c := range tbl.Checks { - desc.allChecks[i] = *c - } - - return desc -} - -// NewImmutable returns a immutable from the given TableDescriptor. -// This function assumes that this descriptor has not been modified from the -// version stored in the key-value store. -func NewImmutable(tbl descpb.TableDescriptor) catalog.TableDescriptor { - return NewImmutableWithIsUncommittedVersion(tbl, false /* isUncommittedVersion */) -} - -// NewImmutableWithIsUncommittedVersion returns a immutable from the given -// TableDescriptor and allows the caller to mark the table as corresponding to -// an uncommitted version. This should be used when constructing a new copy of -// an immutable from an existing descriptor which may have a new version. -func NewImmutableWithIsUncommittedVersion( - tbl descpb.TableDescriptor, isUncommittedVersion bool, -) catalog.TableDescriptor { - desc := makeImmutable(tbl) - desc.isUncommittedVersion = isUncommittedVersion - return &desc -} - -// NewFilledInImmutable will construct an immutable and potentially perform -// post-deserialization upgrades. -func NewFilledInImmutable( - ctx context.Context, dg catalog.DescGetter, tbl *descpb.TableDescriptor, -) (catalog.TableDescriptor, error) { - changes, err := maybeFillInDescriptor(ctx, dg, tbl, false /* skipFKsWithNoMatchingTable */) - if err != nil { - return nil, err - } - desc := makeImmutable(*tbl) - desc.postDeserializationChanges = changes - return &desc, nil -} - // PostDeserializationTableDescriptorChanges are a set of booleans to indicate // which types of upgrades or fixes occurred when filling in the descriptor // after deserialization. @@ -180,9 +93,9 @@ func FindIndexPartitionByName( return desc.Partitioning.FindPartitionByName(name) } -// TypeName returns the plain type of this descriptor. -func (desc *wrapper) TypeName() string { - return "relation" +// DescriptorType returns the type of this descriptor. +func (desc *wrapper) DescriptorType() catalog.DescriptorType { + return catalog.Table } // SetName implements the DescriptorProto interface. @@ -410,32 +323,6 @@ func generatedFamilyName(familyID descpb.FamilyID, columnNames []string) string return buf.String() } -// maybeFillInDescriptor performs any modifications needed to the table descriptor. -// This includes format upgrades and optional changes that can be handled by all version -// (for example: additional default privileges). -func maybeFillInDescriptor( - ctx context.Context, - dg catalog.DescGetter, - desc *descpb.TableDescriptor, - skipFKsWithNoMatchingTable bool, -) (changes PostDeserializationTableDescriptorChanges, err error) { - changes.UpgradedFormatVersion = maybeUpgradeFormatVersion(desc) - - // Fill in any incorrect privileges that may have been missed due to mixed-versions. - // TODO(mberhault): remove this in 2.1 (maybe 2.2) when privilege-fixing migrations have been - // run again and mixed-version clusters always write "good" descriptors. - changes.FixedPrivileges = descpb.MaybeFixPrivileges(desc.ID, &desc.Privileges) - - if dg != nil { - changes.UpgradedForeignKeyRepresentation, err = maybeUpgradeForeignKeyRepresentation( - ctx, dg, skipFKsWithNoMatchingTable /* skipFKsWithNoMatchingTable*/, desc) - } - if err != nil { - return PostDeserializationTableDescriptorChanges{}, err - } - return changes, nil -} - func indexHasDeprecatedForeignKeyRepresentation(idx *descpb.IndexDescriptor) bool { return idx.ForeignKey.IsSet() || len(idx.ReferencedBy) > 0 } @@ -458,254 +345,6 @@ func TableHasDeprecatedForeignKeyRepresentation(desc *descpb.TableDescriptor) bo return false } -// maybeUpgradeForeignKeyRepresentation destructively modifies the input table -// descriptor by replacing all old-style foreign key references (the ForeignKey -// and ReferencedBy fields on IndexDescriptor) with new-style foreign key -// references (the InboundFKs and OutboundFKs fields on TableDescriptor). It -// uses the supplied proto getter to look up the referenced descriptor on -// outgoing FKs and the origin descriptor on incoming FKs. It returns true in -// the first position if the descriptor was upgraded at all (i.e. had old-style -// references on it) and an error if the descriptor was unable to be upgraded -// for some reason. -// If skipFKsWithNoMatchingTable is set to true, if a *table* that's supposed to -// contain the matching forward/back-reference for an FK is not found, the FK -// is dropped from the table and no error is returned. -// -// TODO(lucy): Write tests for when skipFKsWithNoMatchingTable is true. -// TODO(ajwerner): This exists solely for the purpose of front-loading upgrade -// at backup and restore time and occurs in a hacky way. All of that upgrading -// should get reworked but we're leaving this here for now for simplicity. -func maybeUpgradeForeignKeyRepresentation( - ctx context.Context, - dg catalog.DescGetter, - skipFKsWithNoMatchingTable bool, - desc *descpb.TableDescriptor, -) (bool, error) { - if desc.Dropped() { - // If the table has been dropped, it's permitted to have corrupted foreign - // keys, so we have no chance to properly upgrade it. Just return as-is. - return false, nil - } - otherUnupgradedTables := make(map[descpb.ID]catalog.TableDescriptor) - changed := false - // No need to process mutations, since only descriptors written on a 19.2 - // cluster (after finalizing the upgrade) have foreign key mutations. - for i := range desc.Indexes { - newChanged, err := maybeUpgradeForeignKeyRepOnIndex( - ctx, dg, otherUnupgradedTables, desc, &desc.Indexes[i], skipFKsWithNoMatchingTable, - ) - if err != nil { - return false, err - } - changed = changed || newChanged - } - newChanged, err := maybeUpgradeForeignKeyRepOnIndex( - ctx, dg, otherUnupgradedTables, desc, &desc.PrimaryIndex, skipFKsWithNoMatchingTable, - ) - if err != nil { - return false, err - } - changed = changed || newChanged - - return changed, nil -} - -// maybeUpgradeForeignKeyRepOnIndex is the meat of the previous function - it -// tries to upgrade a particular index's foreign key representation. -func maybeUpgradeForeignKeyRepOnIndex( - ctx context.Context, - dg catalog.DescGetter, - otherUnupgradedTables map[descpb.ID]catalog.TableDescriptor, - desc *descpb.TableDescriptor, - idx *descpb.IndexDescriptor, - skipFKsWithNoMatchingTable bool, -) (bool, error) { - var changed bool - if idx.ForeignKey.IsSet() { - ref := &idx.ForeignKey - if _, ok := otherUnupgradedTables[ref.Table]; !ok { - tbl, err := catalog.GetTableDescFromID(ctx, dg, ref.Table) - if err != nil { - if errors.Is(err, catalog.ErrDescriptorNotFound) && skipFKsWithNoMatchingTable { - // Ignore this FK and keep going. - } else { - return false, err - } - } else { - otherUnupgradedTables[ref.Table] = tbl - } - } - if tbl, ok := otherUnupgradedTables[ref.Table]; ok { - referencedIndex, err := tbl.FindIndexWithID(ref.Index) - if err != nil { - return false, err - } - numCols := ref.SharedPrefixLen - outFK := descpb.ForeignKeyConstraint{ - OriginTableID: desc.ID, - OriginColumnIDs: idx.ColumnIDs[:numCols], - ReferencedTableID: ref.Table, - ReferencedColumnIDs: referencedIndex.IndexDesc().ColumnIDs[:numCols], - Name: ref.Name, - Validity: ref.Validity, - OnDelete: ref.OnDelete, - OnUpdate: ref.OnUpdate, - Match: ref.Match, - } - desc.OutboundFKs = append(desc.OutboundFKs, outFK) - } - changed = true - idx.ForeignKey = descpb.ForeignKeyReference{} - } - - for refIdx := range idx.ReferencedBy { - ref := &(idx.ReferencedBy[refIdx]) - if _, ok := otherUnupgradedTables[ref.Table]; !ok { - tbl, err := catalog.GetTableDescFromID(ctx, dg, ref.Table) - if err != nil { - if errors.Is(err, catalog.ErrDescriptorNotFound) && skipFKsWithNoMatchingTable { - // Ignore this FK and keep going. - } else { - return false, err - } - } else { - otherUnupgradedTables[ref.Table] = tbl - } - } - - if otherTable, ok := otherUnupgradedTables[ref.Table]; ok { - originIndexI, err := otherTable.FindIndexWithID(ref.Index) - if err != nil { - return false, err - } - originIndex := originIndexI.IndexDesc() - // There are two cases. Either the other table is old (not upgraded yet), - // or it's new (already upgraded). - var inFK descpb.ForeignKeyConstraint - if !originIndex.ForeignKey.IsSet() { - // The other table has either no foreign key, indicating a corrupt - // reference, or the other table was upgraded. Assume the second for now. - // If we also find no matching reference in the new-style foreign keys, - // that indicates a corrupt reference. - var forwardFK *descpb.ForeignKeyConstraint - _ = otherTable.ForeachOutboundFK(func(otherFK *descpb.ForeignKeyConstraint) error { - if forwardFK != nil { - return nil - } - // To find a match, we find a foreign key reference that has the same - // referenced table ID, and that the index we point to is a valid - // index to satisfy the columns in the foreign key. - if otherFK.ReferencedTableID == desc.ID && - descpb.ColumnIDs(originIndex.ColumnIDs).HasPrefix(otherFK.OriginColumnIDs) { - // Found a match. - forwardFK = otherFK - } - return nil - }) - if forwardFK == nil { - // Corrupted foreign key - there was no forward reference for the back - // reference. - return false, errors.AssertionFailedf( - "error finding foreign key on table %d for backref %+v", - otherTable.GetID(), ref) - } - inFK = descpb.ForeignKeyConstraint{ - OriginTableID: ref.Table, - OriginColumnIDs: forwardFK.OriginColumnIDs, - ReferencedTableID: desc.ID, - ReferencedColumnIDs: forwardFK.ReferencedColumnIDs, - Name: forwardFK.Name, - Validity: forwardFK.Validity, - OnDelete: forwardFK.OnDelete, - OnUpdate: forwardFK.OnUpdate, - Match: forwardFK.Match, - } - } else { - // We have an old (not upgraded yet) table, with a matching forward - // foreign key. - numCols := originIndex.ForeignKey.SharedPrefixLen - inFK = descpb.ForeignKeyConstraint{ - OriginTableID: ref.Table, - OriginColumnIDs: originIndex.ColumnIDs[:numCols], - ReferencedTableID: desc.ID, - ReferencedColumnIDs: idx.ColumnIDs[:numCols], - Name: originIndex.ForeignKey.Name, - Validity: originIndex.ForeignKey.Validity, - OnDelete: originIndex.ForeignKey.OnDelete, - OnUpdate: originIndex.ForeignKey.OnUpdate, - Match: originIndex.ForeignKey.Match, - } - } - desc.InboundFKs = append(desc.InboundFKs, inFK) - } - changed = true - } - idx.ReferencedBy = nil - return changed, nil -} - -// maybeUpgradeFormatVersion transforms the TableDescriptor to the latest -// FormatVersion (if it's not already there) and returns true if any changes -// were made. -// This method should be called through maybeFillInDescriptor, not directly. -func maybeUpgradeFormatVersion(desc *descpb.TableDescriptor) bool { - if desc.FormatVersion >= descpb.InterleavedFormatVersion { - return false - } - maybeUpgradeToFamilyFormatVersion(desc) - desc.FormatVersion = descpb.InterleavedFormatVersion - return true -} - -func maybeUpgradeToFamilyFormatVersion(desc *descpb.TableDescriptor) bool { - if desc.FormatVersion >= descpb.FamilyFormatVersion { - return false - } - - var primaryIndexColumnIDs catalog.TableColSet - for _, colID := range desc.PrimaryIndex.ColumnIDs { - primaryIndexColumnIDs.Add(colID) - } - - desc.Families = []descpb.ColumnFamilyDescriptor{ - {ID: 0, Name: "primary"}, - } - desc.NextFamilyID = desc.Families[0].ID + 1 - addFamilyForCol := func(col *descpb.ColumnDescriptor) { - if primaryIndexColumnIDs.Contains(col.ID) { - desc.Families[0].ColumnNames = append(desc.Families[0].ColumnNames, col.Name) - desc.Families[0].ColumnIDs = append(desc.Families[0].ColumnIDs, col.ID) - return - } - colNames := []string{col.Name} - family := descpb.ColumnFamilyDescriptor{ - ID: descpb.FamilyID(col.ID), - Name: generatedFamilyName(descpb.FamilyID(col.ID), colNames), - ColumnNames: colNames, - ColumnIDs: []descpb.ColumnID{col.ID}, - DefaultColumnID: col.ID, - } - desc.Families = append(desc.Families, family) - if family.ID >= desc.NextFamilyID { - desc.NextFamilyID = family.ID + 1 - } - } - - for i := range desc.Columns { - addFamilyForCol(&desc.Columns[i]) - } - for i := range desc.Mutations { - m := &desc.Mutations[i] - if c := m.GetColumn(); c != nil { - addFamilyForCol(c) - } - } - - desc.FormatVersion = descpb.FamilyFormatVersion - - return true -} - // ForEachExprStringInTableDesc runs a closure for each expression string // within a TableDescriptor. The closure takes in a string pointer so that // it can mutate the TableDescriptor if desired. @@ -2365,7 +2004,7 @@ func (desc *wrapper) MakeFirstMutationPublic( includeConstraints bool, ) (catalog.TableDescriptor, error) { // Clone the ImmutableTable descriptor because we want to create an ImmutableCopy one. - table := NewExistingMutable(*protoutil.Clone(desc.TableDesc()).(*descpb.TableDescriptor)) + table := NewBuilder(desc.TableDesc()).BuildExistingMutableTable() mutationID := desc.Mutations[0].MutationID i := 0 for _, mutation := range desc.Mutations { diff --git a/pkg/sql/catalog/tabledesc/structured_test.go b/pkg/sql/catalog/tabledesc/structured_test.go index dd28704d4b12..b05fb47652b7 100644 --- a/pkg/sql/catalog/tabledesc/structured_test.go +++ b/pkg/sql/catalog/tabledesc/structured_test.go @@ -51,7 +51,7 @@ func TestAllocateIDs(t *testing.T) { defer leaktest.AfterTest(t)() ctx := context.Background() - desc := NewCreatedMutable(descpb.TableDescriptor{ + desc := NewBuilder(&descpb.TableDescriptor{ ParentID: keys.MinUserDescID, ID: keys.MinUserDescID + 1, Name: "foo", @@ -72,12 +72,12 @@ func TestAllocateIDs(t *testing.T) { }, Privileges: descpb.NewDefaultPrivilegeDescriptor(security.AdminRoleName()), FormatVersion: descpb.FamilyFormatVersion, - }) + }).BuildCreatedMutableTable() if err := desc.AllocateIDs(ctx); err != nil { t.Fatal(err) } - expected := NewCreatedMutable(descpb.TableDescriptor{ + expected := NewBuilder(&descpb.TableDescriptor{ ParentID: keys.MinUserDescID, ID: keys.MinUserDescID + 1, Version: 1, @@ -117,7 +117,7 @@ func TestAllocateIDs(t *testing.T) { NextIndexID: 5, NextMutationID: 1, FormatVersion: descpb.FamilyFormatVersion, - }) + }).BuildCreatedMutableTable() if !reflect.DeepEqual(expected, desc) { a, _ := json.MarshalIndent(expected, "", " ") b, _ := json.MarshalIndent(desc, "", " ") @@ -184,7 +184,7 @@ func TestFitColumnToFamily(t *testing.T) { } desc.Families = append(desc.Families, family) } - return NewCreatedMutable(desc) + return NewBuilder(&desc).BuildCreatedMutableTable() } emptyFamily := []*types.T{} @@ -283,7 +283,9 @@ func TestMaybeUpgradeFormatVersion(t *testing.T) { }, } for i, test := range tests { - desc, err := NewFilledInImmutable(context.Background(), nil, &test.desc) + b := NewBuilder(&test.desc) + err := b.RunPostDeserializationChanges(context.Background(), nil) + desc := b.BuildImmutableTable() require.NoError(t, err) changes, err := GetPostDeserializationChanges(desc) require.NoError(t, err) @@ -300,7 +302,7 @@ func TestMaybeUpgradeFormatVersion(t *testing.T) { func TestUnvalidateConstraints(t *testing.T) { ctx := context.Background() - desc := NewCreatedMutable(descpb.TableDescriptor{ + desc := NewBuilder(&descpb.TableDescriptor{ Name: "test", ParentID: descpb.ID(1), Columns: []descpb.ColumnDescriptor{ @@ -317,7 +319,7 @@ func TestUnvalidateConstraints(t *testing.T) { Validity: descpb.ConstraintValidity_Validated, }, }, - }) + }).BuildCreatedMutableTable() if err := desc.AllocateIDs(ctx); err != nil { t.Fatal(err) } @@ -480,7 +482,7 @@ func TestDefaultExprNil(t *testing.T) { } // Test and verify that the default expressions of the column descriptors // are all nil. - // nolint:descriptormarshal + //nolint:descriptormarshal for _, col := range desc.GetTable().Columns { if col.DefaultExpr != nil { t.Errorf("expected Column Default Expression to be 'nil', got %s instead.", *col.DefaultExpr) diff --git a/pkg/sql/catalog/tabledesc/table_desc.go b/pkg/sql/catalog/tabledesc/table_desc.go index e3324f1064f4..b17f36a65d00 100644 --- a/pkg/sql/catalog/tabledesc/table_desc.go +++ b/pkg/sql/catalog/tabledesc/table_desc.go @@ -18,7 +18,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/errors" ) @@ -107,11 +106,10 @@ func (desc *wrapper) IsTemporary() bool { // ImmutableCopy implements the MutableDescriptor interface. func (desc *Mutable) ImmutableCopy() catalog.Descriptor { - // TODO (lucy): Should the immutable descriptor constructors always make a - // copy, so we don't have to do it here? - imm := NewImmutable(*protoutil.Clone(desc.TableDesc()).(*descpb.TableDescriptor)) - imm.(*immutable).isUncommittedVersion = desc.IsUncommittedVersion() - return imm + if desc.IsUncommittedVersion() { + return NewBuilderForUncommittedVersion(desc.TableDesc()).BuildImmutable() + } + return NewBuilder(desc.TableDesc()).BuildImmutable() } // IsUncommittedVersion implements the Descriptor interface. diff --git a/pkg/sql/catalog/tabledesc/table_desc_builder.go b/pkg/sql/catalog/tabledesc/table_desc_builder.go new file mode 100644 index 000000000000..96d54d745b0b --- /dev/null +++ b/pkg/sql/catalog/tabledesc/table_desc_builder.go @@ -0,0 +1,436 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package tabledesc + +import ( + "context" + + "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/util/protoutil" + "github.com/cockroachdb/errors" +) + +// TableDescriptorBuilder is an extension of catalog.DescriptorBuilder +// for table descriptors. +type TableDescriptorBuilder interface { + catalog.DescriptorBuilder + BuildImmutableTable() catalog.TableDescriptor + BuildExistingMutableTable() *Mutable + BuildCreatedMutableTable() *Mutable +} + +type tableDescriptorBuilder struct { + original *descpb.TableDescriptor + maybeModified *descpb.TableDescriptor + changes PostDeserializationTableDescriptorChanges + skipFKsWithNoMatchingTable bool + isUncommittedVersion bool +} + +var _ TableDescriptorBuilder = &tableDescriptorBuilder{} + +// NewBuilder creates a new catalog.DescriptorBuilder object for building +// table descriptors. +func NewBuilder(desc *descpb.TableDescriptor) TableDescriptorBuilder { + return newBuilder(desc) +} + +// NewBuilderForUncommittedVersion is like NewBuilder but ensures that the +// uncommitted version flag is set in the built descriptor. +// This should be used when constructing a new copy of an immutable from an +// existing descriptor which may have a new version. +func NewBuilderForUncommittedVersion(desc *descpb.TableDescriptor) TableDescriptorBuilder { + b := newBuilder(desc) + b.isUncommittedVersion = true + return b +} + +// NewBuilderForFKUpgrade should be used when attempting to upgrade the +// foreign key representation of a table descriptor. +// When skipFKsWithNoMatchingTable is set, the FK upgrade is allowed +// to proceed even in the case where a referenced table cannot be retrieved +// by the DescGetter. Such upgrades are then not fully complete. +func NewBuilderForFKUpgrade( + desc *descpb.TableDescriptor, skipFKsWithNoMatchingTable bool, +) TableDescriptorBuilder { + b := newBuilder(desc) + b.skipFKsWithNoMatchingTable = skipFKsWithNoMatchingTable + return b +} + +func newBuilder(desc *descpb.TableDescriptor) *tableDescriptorBuilder { + return &tableDescriptorBuilder{ + original: protoutil.Clone(desc).(*descpb.TableDescriptor), + } +} + +// DescriptorType implements the catalog.DescriptorBuilder interface. +func (tdb *tableDescriptorBuilder) DescriptorType() catalog.DescriptorType { + return catalog.Table +} + +// RunPostDeserializationChanges implements the catalog.DescriptorBuilder +// interface. +func (tdb *tableDescriptorBuilder) RunPostDeserializationChanges( + ctx context.Context, dg catalog.DescGetter, +) (err error) { + tdb.maybeModified = protoutil.Clone(tdb.original).(*descpb.TableDescriptor) + tdb.changes, err = maybeFillInDescriptor(ctx, dg, tdb.maybeModified, tdb.skipFKsWithNoMatchingTable) + return err +} + +// BuildImmutable implements the catalog.DescriptorBuilder interface. +func (tdb *tableDescriptorBuilder) BuildImmutable() catalog.Descriptor { + return tdb.BuildImmutableTable() +} + +// BuildImmutableTable returns an immutable table descriptor. +func (tdb *tableDescriptorBuilder) BuildImmutableTable() catalog.TableDescriptor { + desc := tdb.maybeModified + if desc == nil { + desc = tdb.original + } + imm := makeImmutable(desc) + imm.postDeserializationChanges = tdb.changes + imm.isUncommittedVersion = tdb.isUncommittedVersion + return imm +} + +// BuildExistingMutable implements the catalog.DescriptorBuilder interface. +func (tdb *tableDescriptorBuilder) BuildExistingMutable() catalog.MutableDescriptor { + return tdb.BuildExistingMutableTable() +} + +// BuildExistingMutableTable returns a mutable descriptor for a table +// which already exists. +func (tdb *tableDescriptorBuilder) BuildExistingMutableTable() *Mutable { + if tdb.maybeModified == nil { + tdb.maybeModified = protoutil.Clone(tdb.original).(*descpb.TableDescriptor) + } + return &Mutable{ + wrapper: wrapper{ + TableDescriptor: *tdb.maybeModified, + postDeserializationChanges: tdb.changes, + }, + ClusterVersion: *tdb.original, + } +} + +// BuildCreatedMutable implements the catalog.DescriptorBuilder interface. +func (tdb *tableDescriptorBuilder) BuildCreatedMutable() catalog.MutableDescriptor { + return tdb.BuildCreatedMutableTable() +} + +// BuildCreatedMutableTable returns a mutable descriptor for a table +// which is in the process of being created. +func (tdb *tableDescriptorBuilder) BuildCreatedMutableTable() *Mutable { + desc := tdb.maybeModified + if desc == nil { + desc = tdb.original + } + return &Mutable{ + wrapper: wrapper{ + TableDescriptor: *desc, + postDeserializationChanges: tdb.changes, + }, + } +} + +// makeImmutable returns an immutable from the given TableDescriptor. +func makeImmutable(tbl *descpb.TableDescriptor) *immutable { + desc := immutable{wrapper: wrapper{ + TableDescriptor: *tbl, + indexCache: newIndexCache(tbl), + columnCache: newColumnCache(tbl), + }} + + desc.allChecks = make([]descpb.TableDescriptor_CheckConstraint, len(tbl.Checks)) + for i, c := range tbl.Checks { + desc.allChecks[i] = *c + } + + return &desc +} + +// maybeFillInDescriptor performs any modifications needed to the table descriptor. +// This includes format upgrades and optional changes that can be handled by all version +// (for example: additional default privileges). +func maybeFillInDescriptor( + ctx context.Context, + dg catalog.DescGetter, + desc *descpb.TableDescriptor, + skipFKsWithNoMatchingTable bool, +) (changes PostDeserializationTableDescriptorChanges, err error) { + changes.UpgradedFormatVersion = maybeUpgradeFormatVersion(desc) + + // Fill in any incorrect privileges that may have been missed due to mixed-versions. + // TODO(mberhault): remove this in 2.1 (maybe 2.2) when privilege-fixing migrations have been + // run again and mixed-version clusters always write "good" descriptors. + changes.FixedPrivileges = descpb.MaybeFixPrivileges(desc.ID, &desc.Privileges) + + if dg != nil { + changes.UpgradedForeignKeyRepresentation, err = maybeUpgradeForeignKeyRepresentation( + ctx, dg, skipFKsWithNoMatchingTable /* skipFKsWithNoMatchingTable*/, desc) + } + if err != nil { + return PostDeserializationTableDescriptorChanges{}, err + } + return changes, nil +} + +// maybeUpgradeForeignKeyRepresentation destructively modifies the input table +// descriptor by replacing all old-style foreign key references (the ForeignKey +// and ReferencedBy fields on IndexDescriptor) with new-style foreign key +// references (the InboundFKs and OutboundFKs fields on TableDescriptor). It +// uses the supplied proto getter to look up the referenced descriptor on +// outgoing FKs and the origin descriptor on incoming FKs. It returns true in +// the first position if the descriptor was upgraded at all (i.e. had old-style +// references on it) and an error if the descriptor was unable to be upgraded +// for some reason. +// If skipFKsWithNoMatchingTable is set to true, if a *table* that's supposed to +// contain the matching forward/back-reference for an FK is not found, the FK +// is dropped from the table and no error is returned. +// +// TODO(lucy): Write tests for when skipFKsWithNoMatchingTable is true. +// TODO(ajwerner): This exists solely for the purpose of front-loading upgrade +// at backup and restore time and occurs in a hacky way. All of that upgrading +// should get reworked but we're leaving this here for now for simplicity. +func maybeUpgradeForeignKeyRepresentation( + ctx context.Context, + dg catalog.DescGetter, + skipFKsWithNoMatchingTable bool, + desc *descpb.TableDescriptor, +) (bool, error) { + if desc.Dropped() { + // If the table has been dropped, it's permitted to have corrupted foreign + // keys, so we have no chance to properly upgrade it. Just return as-is. + return false, nil + } + otherUnupgradedTables := make(map[descpb.ID]catalog.TableDescriptor) + changed := false + // No need to process mutations, since only descriptors written on a 19.2 + // cluster (after finalizing the upgrade) have foreign key mutations. + for i := range desc.Indexes { + newChanged, err := maybeUpgradeForeignKeyRepOnIndex( + ctx, dg, otherUnupgradedTables, desc, &desc.Indexes[i], skipFKsWithNoMatchingTable, + ) + if err != nil { + return false, err + } + changed = changed || newChanged + } + newChanged, err := maybeUpgradeForeignKeyRepOnIndex( + ctx, dg, otherUnupgradedTables, desc, &desc.PrimaryIndex, skipFKsWithNoMatchingTable, + ) + if err != nil { + return false, err + } + changed = changed || newChanged + + return changed, nil +} + +// maybeUpgradeForeignKeyRepOnIndex is the meat of the previous function - it +// tries to upgrade a particular index's foreign key representation. +func maybeUpgradeForeignKeyRepOnIndex( + ctx context.Context, + dg catalog.DescGetter, + otherUnupgradedTables map[descpb.ID]catalog.TableDescriptor, + desc *descpb.TableDescriptor, + idx *descpb.IndexDescriptor, + skipFKsWithNoMatchingTable bool, +) (bool, error) { + var changed bool + if idx.ForeignKey.IsSet() { + ref := &idx.ForeignKey + if _, ok := otherUnupgradedTables[ref.Table]; !ok { + tbl, err := catalog.GetTableDescFromID(ctx, dg, ref.Table) + if err != nil { + if errors.Is(err, catalog.ErrDescriptorNotFound) && skipFKsWithNoMatchingTable { + // Ignore this FK and keep going. + } else { + return false, err + } + } else { + otherUnupgradedTables[ref.Table] = tbl + } + } + if tbl, ok := otherUnupgradedTables[ref.Table]; ok { + referencedIndex, err := tbl.FindIndexWithID(ref.Index) + if err != nil { + return false, err + } + numCols := ref.SharedPrefixLen + outFK := descpb.ForeignKeyConstraint{ + OriginTableID: desc.ID, + OriginColumnIDs: idx.ColumnIDs[:numCols], + ReferencedTableID: ref.Table, + ReferencedColumnIDs: referencedIndex.IndexDesc().ColumnIDs[:numCols], + Name: ref.Name, + Validity: ref.Validity, + OnDelete: ref.OnDelete, + OnUpdate: ref.OnUpdate, + Match: ref.Match, + } + desc.OutboundFKs = append(desc.OutboundFKs, outFK) + } + changed = true + idx.ForeignKey = descpb.ForeignKeyReference{} + } + + for refIdx := range idx.ReferencedBy { + ref := &(idx.ReferencedBy[refIdx]) + if _, ok := otherUnupgradedTables[ref.Table]; !ok { + tbl, err := catalog.GetTableDescFromID(ctx, dg, ref.Table) + if err != nil { + if errors.Is(err, catalog.ErrDescriptorNotFound) && skipFKsWithNoMatchingTable { + // Ignore this FK and keep going. + } else { + return false, err + } + } else { + otherUnupgradedTables[ref.Table] = tbl + } + } + + if otherTable, ok := otherUnupgradedTables[ref.Table]; ok { + originIndexI, err := otherTable.FindIndexWithID(ref.Index) + if err != nil { + return false, err + } + originIndex := originIndexI.IndexDesc() + // There are two cases. Either the other table is old (not upgraded yet), + // or it's new (already upgraded). + var inFK descpb.ForeignKeyConstraint + if !originIndex.ForeignKey.IsSet() { + // The other table has either no foreign key, indicating a corrupt + // reference, or the other table was upgraded. Assume the second for now. + // If we also find no matching reference in the new-style foreign keys, + // that indicates a corrupt reference. + var forwardFK *descpb.ForeignKeyConstraint + _ = otherTable.ForeachOutboundFK(func(otherFK *descpb.ForeignKeyConstraint) error { + if forwardFK != nil { + return nil + } + // To find a match, we find a foreign key reference that has the same + // referenced table ID, and that the index we point to is a valid + // index to satisfy the columns in the foreign key. + if otherFK.ReferencedTableID == desc.ID && + descpb.ColumnIDs(originIndex.ColumnIDs).HasPrefix(otherFK.OriginColumnIDs) { + // Found a match. + forwardFK = otherFK + } + return nil + }) + if forwardFK == nil { + // Corrupted foreign key - there was no forward reference for the back + // reference. + return false, errors.AssertionFailedf( + "error finding foreign key on table %d for backref %+v", + otherTable.GetID(), ref) + } + inFK = descpb.ForeignKeyConstraint{ + OriginTableID: ref.Table, + OriginColumnIDs: forwardFK.OriginColumnIDs, + ReferencedTableID: desc.ID, + ReferencedColumnIDs: forwardFK.ReferencedColumnIDs, + Name: forwardFK.Name, + Validity: forwardFK.Validity, + OnDelete: forwardFK.OnDelete, + OnUpdate: forwardFK.OnUpdate, + Match: forwardFK.Match, + } + } else { + // We have an old (not upgraded yet) table, with a matching forward + // foreign key. + numCols := originIndex.ForeignKey.SharedPrefixLen + inFK = descpb.ForeignKeyConstraint{ + OriginTableID: ref.Table, + OriginColumnIDs: originIndex.ColumnIDs[:numCols], + ReferencedTableID: desc.ID, + ReferencedColumnIDs: idx.ColumnIDs[:numCols], + Name: originIndex.ForeignKey.Name, + Validity: originIndex.ForeignKey.Validity, + OnDelete: originIndex.ForeignKey.OnDelete, + OnUpdate: originIndex.ForeignKey.OnUpdate, + Match: originIndex.ForeignKey.Match, + } + } + desc.InboundFKs = append(desc.InboundFKs, inFK) + } + changed = true + } + idx.ReferencedBy = nil + return changed, nil +} + +// maybeUpgradeFormatVersion transforms the TableDescriptor to the latest +// FormatVersion (if it's not already there) and returns true if any changes +// were made. +// This method should be called through maybeFillInDescriptor, not directly. +func maybeUpgradeFormatVersion(desc *descpb.TableDescriptor) bool { + if desc.FormatVersion >= descpb.InterleavedFormatVersion { + return false + } + maybeUpgradeToFamilyFormatVersion(desc) + desc.FormatVersion = descpb.InterleavedFormatVersion + return true +} + +func maybeUpgradeToFamilyFormatVersion(desc *descpb.TableDescriptor) bool { + if desc.FormatVersion >= descpb.FamilyFormatVersion { + return false + } + + var primaryIndexColumnIDs catalog.TableColSet + for _, colID := range desc.PrimaryIndex.ColumnIDs { + primaryIndexColumnIDs.Add(colID) + } + + desc.Families = []descpb.ColumnFamilyDescriptor{ + {ID: 0, Name: "primary"}, + } + desc.NextFamilyID = desc.Families[0].ID + 1 + addFamilyForCol := func(col *descpb.ColumnDescriptor) { + if primaryIndexColumnIDs.Contains(col.ID) { + desc.Families[0].ColumnNames = append(desc.Families[0].ColumnNames, col.Name) + desc.Families[0].ColumnIDs = append(desc.Families[0].ColumnIDs, col.ID) + return + } + colNames := []string{col.Name} + family := descpb.ColumnFamilyDescriptor{ + ID: descpb.FamilyID(col.ID), + Name: generatedFamilyName(descpb.FamilyID(col.ID), colNames), + ColumnNames: colNames, + ColumnIDs: []descpb.ColumnID{col.ID}, + DefaultColumnID: col.ID, + } + desc.Families = append(desc.Families, family) + if family.ID >= desc.NextFamilyID { + desc.NextFamilyID = family.ID + 1 + } + } + + for i := range desc.Columns { + addFamilyForCol(&desc.Columns[i]) + } + for i := range desc.Mutations { + m := &desc.Mutations[i] + if c := m.GetColumn(); c != nil { + addFamilyForCol(c) + } + } + + desc.FormatVersion = descpb.FamilyFormatVersion + + return true +} diff --git a/pkg/sql/catalog/tabledesc/table_desc_test.go b/pkg/sql/catalog/tabledesc/table_desc_test.go index d6fee249b4af..a5814bc3cbea 100644 --- a/pkg/sql/catalog/tabledesc/table_desc_test.go +++ b/pkg/sql/catalog/tabledesc/table_desc_test.go @@ -21,27 +21,27 @@ func TestMaybeIncrementVersion(t *testing.T) { // Created descriptors should not have their version incremented. t.Run("created does not get incremented", func(t *testing.T) { { - mut := NewCreatedMutable(descpb.TableDescriptor{ + mut := NewBuilder(&descpb.TableDescriptor{ ID: 1, Version: 1, - }) + }).BuildCreatedMutableTable() mut.MaybeIncrementVersion() require.Equal(t, descpb.DescriptorVersion(1), mut.GetVersion()) } { - mut := NewCreatedMutable(descpb.TableDescriptor{ + mut := NewBuilder(&descpb.TableDescriptor{ ID: 1, Version: 42, - }) + }).BuildCreatedMutableTable() mut.MaybeIncrementVersion() require.Equal(t, descpb.DescriptorVersion(42), mut.GetVersion()) } }) t.Run("existed gets incremented once", func(t *testing.T) { - mut := NewExistingMutable(descpb.TableDescriptor{ + mut := NewBuilder(&descpb.TableDescriptor{ ID: 1, Version: 1, - }) + }).BuildExistingMutableTable() require.Equal(t, descpb.DescriptorVersion(1), mut.GetVersion()) mut.MaybeIncrementVersion() require.Equal(t, descpb.DescriptorVersion(2), mut.GetVersion()) diff --git a/pkg/sql/catalog/tabledesc/validate.go b/pkg/sql/catalog/tabledesc/validate.go index bf191a7159a4..2acad66366f7 100644 --- a/pkg/sql/catalog/tabledesc/validate.go +++ b/pkg/sql/catalog/tabledesc/validate.go @@ -569,11 +569,6 @@ func (desc *wrapper) ValidateSelf(vea catalog.ValidationErrorAccumulator) { } } - // Fill in any incorrect privileges that may have been missed due to mixed-versions. - // TODO(mberhault): remove this in 2.1 (maybe 2.2) when privilege-fixing migrations have been - // run again and mixed-version clusters always write "good" descriptors. - descpb.MaybeFixPrivileges(desc.ID, &desc.Privileges) - // Validate the privilege descriptor. vea.Report(desc.Privileges.Validate(desc.GetID(), privilege.Table)) diff --git a/pkg/sql/catalog/tabledesc/validate_test.go b/pkg/sql/catalog/tabledesc/validate_test.go index 3af2753d01cf..a7ee0c56d1d4 100644 --- a/pkg/sql/catalog/tabledesc/validate_test.go +++ b/pkg/sql/catalog/tabledesc/validate_test.go @@ -1050,8 +1050,8 @@ func TestValidateTableDesc(t *testing.T) { } for i, d := range testData { t.Run(d.err, func(t *testing.T) { - desc := NewImmutable(d.desc) - expectedErr := fmt.Sprintf("%s %q (%d): %s", desc.TypeName(), desc.GetName(), desc.GetID(), d.err) + desc := NewBuilder(&d.desc).BuildImmutableTable() + expectedErr := fmt.Sprintf("%s %q (%d): %s", desc.DescriptorType(), desc.GetName(), desc.GetID(), d.err) if err := catalog.ValidateSelf(desc); err == nil { t.Errorf("%d: expected \"%s\", but found success: %+v", i, expectedErr, d.desc) } else if expectedErr != err.Error() { @@ -1476,13 +1476,13 @@ func TestValidateCrossTableReferences(t *testing.T) { for i, test := range tests { descs := catalog.MapDescGetter{} - descs[1] = dbdesc.NewImmutable(descpb.DatabaseDescriptor{ID: 1}) + descs[1] = dbdesc.NewBuilder(&descpb.DatabaseDescriptor{ID: 1}).BuildImmutable() for _, otherDesc := range test.otherDescs { otherDesc.Privileges = descpb.NewDefaultPrivilegeDescriptor(security.AdminRoleName()) - descs[otherDesc.ID] = NewImmutable(otherDesc) + descs[otherDesc.ID] = NewBuilder(&otherDesc).BuildImmutable() } - desc := NewImmutable(test.desc) - expectedErr := fmt.Sprintf("%s %q (%d): %s", desc.TypeName(), desc.GetName(), desc.GetID(), test.err) + desc := NewBuilder(&test.desc).BuildImmutable() + expectedErr := fmt.Sprintf("%s %q (%d): %s", desc.DescriptorType(), desc.GetName(), desc.GetID(), test.err) const validateCrossReferencesOnly = catalog.ValidationLevelSelfAndCrossReferences &^ (catalog.ValidationLevelSelfAndCrossReferences >> 1) if err := catalog.Validate(ctx, descs, validateCrossReferencesOnly, desc).CombinedError(); err == nil { if test.err != "" { @@ -1694,7 +1694,7 @@ func TestValidatePartitioning(t *testing.T) { } for i, test := range tests { t.Run(test.err, func(t *testing.T) { - desc := NewImmutable(test.desc) + desc := NewBuilder(&test.desc).BuildImmutableTable() err := ValidatePartitioning(desc) if !testutils.IsError(err, test.err) { t.Errorf(`%d: got "%v" expected "%v"`, i, err, test.err) diff --git a/pkg/sql/catalog/typedesc/BUILD.bazel b/pkg/sql/catalog/typedesc/BUILD.bazel index 7ddcb7c6843f..10b9127b8ef1 100644 --- a/pkg/sql/catalog/typedesc/BUILD.bazel +++ b/pkg/sql/catalog/typedesc/BUILD.bazel @@ -5,6 +5,7 @@ go_library( srcs = [ "safe_format.go", "type_desc.go", + "type_desc_builder.go", ], importpath = "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc", visibility = ["//visibility:public"], diff --git a/pkg/sql/catalog/typedesc/safe_format_test.go b/pkg/sql/catalog/typedesc/safe_format_test.go index bbfcc0fba39c..6ba763534d76 100644 --- a/pkg/sql/catalog/typedesc/safe_format_test.go +++ b/pkg/sql/catalog/typedesc/safe_format_test.go @@ -28,7 +28,7 @@ func TestSafeMessage(t *testing.T) { exp string }{ { - desc: typedesc.NewImmutable(descpb.TypeDescriptor{ + desc: typedesc.NewBuilder(&descpb.TypeDescriptor{ Name: "foo", ID: 21, Version: 3, @@ -46,13 +46,13 @@ func TestSafeMessage(t *testing.T) { State: descpb.DescriptorState_PUBLIC, Kind: descpb.TypeDescriptor_ALIAS, ReferencingDescriptorIDs: []descpb.ID{73, 37}, - }), + }).BuildImmutableType(), exp: `typedesc.Immutable: {ID: 21, Version: 3, ModificationTime: "0,0", ` + `ParentID: 2, ParentSchemaID: 29, State: PUBLIC, NumDrainingNames: 1, ` + `Kind: ALIAS, ArrayTypeID: 117, ReferencingDescriptorIDs: [73, 37]}`, }, { - desc: typedesc.NewImmutable(descpb.TypeDescriptor{ + desc: typedesc.NewBuilder(&descpb.TypeDescriptor{ Name: "foo", ID: 21, Version: 3, @@ -73,7 +73,7 @@ func TestSafeMessage(t *testing.T) { EnumMembers: []descpb.TypeDescriptor_EnumMember{ {}, }, - }), + }).BuildImmutableType(), exp: `typedesc.Immutable: {ID: 21, Version: 3, ModificationTime: "0,0", ` + `ParentID: 2, ParentSchemaID: 29, State: PUBLIC, NumDrainingNames: 1, ` + `Kind: ENUM, NumEnumMembers: 1, ArrayTypeID: 117, ReferencingDescriptorIDs: [73, 37]}`, diff --git a/pkg/sql/catalog/typedesc/type_desc.go b/pkg/sql/catalog/typedesc/type_desc.go index eb5208edc554..0f21500fbd43 100644 --- a/pkg/sql/catalog/typedesc/type_desc.go +++ b/pkg/sql/catalog/typedesc/type_desc.go @@ -28,7 +28,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/hlc" - "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/errors" "github.com/lib/pq/oid" ) @@ -41,7 +40,7 @@ var _ catalog.MutableDescriptor = (*Mutable)(nil) // type. It is intended to be used as an intermediate for name resolution, and // should not be serialized and stored on disk. func MakeSimpleAlias(typ *types.T, parentSchemaID descpb.ID) *Immutable { - return NewImmutable(descpb.TypeDescriptor{ + return NewBuilder(&descpb.TypeDescriptor{ // TODO(#sql-features): this should be attached to the current database. // We don't have a way of doing this yet (and virtual tables use some // fake magic). @@ -52,7 +51,7 @@ func MakeSimpleAlias(typ *types.T, parentSchemaID descpb.ID) *Immutable { ID: descpb.InvalidID, Kind: descpb.TypeDescriptor_ALIAS, Alias: typ, - }) + }).BuildImmutableType() } // NameResolutionResult implements the NameResolutionResult interface. @@ -94,69 +93,20 @@ type Immutable struct { isUncommittedVersion bool } -// NewCreatedMutable returns a Mutable from the given type descriptor with the -// cluster version being the zero type. This is for a type that is created in -// the same transaction. -func NewCreatedMutable(desc descpb.TypeDescriptor) *Mutable { - return &Mutable{ - Immutable: makeImmutable(desc), - } -} - -// NewExistingMutable returns a Mutable from the given type descriptor with the -// cluster version also set to the descriptor. This is for types that already -// exist. -func NewExistingMutable(desc descpb.TypeDescriptor) *Mutable { - return &Mutable{ - Immutable: makeImmutable(*protoutil.Clone(&desc).(*descpb.TypeDescriptor)), - ClusterVersion: NewImmutable(desc), - } -} - // UpdateCachedFieldsOnModifiedMutable refreshes the Immutable field by // reconstructing it. This means that the fields used to fill enumMetadata // (readOnly, logicalReps, physicalReps) are reconstructed to reflect the // modified Mutable's state. This allows us to hydrate tables correctly even // when preceded by a type descriptor modification in the same transaction. func UpdateCachedFieldsOnModifiedMutable(desc catalog.TypeDescriptor) (*Mutable, error) { - imm := makeImmutable(*protoutil.Clone(desc.TypeDesc()).(*descpb.TypeDescriptor)) - imm.isUncommittedVersion = desc.IsUncommittedVersion() - mutable, ok := desc.(*Mutable) if !ok { return nil, errors.AssertionFailedf("type descriptor was not mutable") } - mutable.Immutable = imm + mutable.Immutable = *mutable.ImmutableCopy().(*Immutable) return mutable, nil } -// NewImmutable returns an Immutable from the given TypeDescriptor. -func NewImmutable(desc descpb.TypeDescriptor) *Immutable { - m := makeImmutable(desc) - return &m -} - -func makeImmutable(desc descpb.TypeDescriptor) Immutable { - immutDesc := Immutable{TypeDescriptor: desc} - - // Initialize metadata specific to the TypeDescriptor kind. - switch immutDesc.Kind { - case descpb.TypeDescriptor_ENUM, descpb.TypeDescriptor_MULTIREGION_ENUM: - immutDesc.logicalReps = make([]string, len(desc.EnumMembers)) - immutDesc.physicalReps = make([][]byte, len(desc.EnumMembers)) - immutDesc.readOnlyMembers = make([]bool, len(desc.EnumMembers)) - for i := range desc.EnumMembers { - member := &desc.EnumMembers[i] - immutDesc.logicalReps[i] = member.LogicalRepresentation - immutDesc.physicalReps[i] = member.PhysicalRepresentation - immutDesc.readOnlyMembers[i] = - member.Capability == descpb.TypeDescriptor_EnumMember_READ_ONLY - } - } - - return immutDesc -} - // TypeIDToOID converts a type descriptor ID into a type OID. func TypeIDToOID(id descpb.ID) oid.Oid { return oid.Oid(id) + oidext.CockroachPredefinedOIDMax @@ -253,9 +203,9 @@ func (desc *Immutable) GetAuditMode() descpb.TableDescriptor_AuditMode { return descpb.TableDescriptor_DISABLED } -// TypeName implements the DescriptorProto interface. -func (desc *Immutable) TypeName() string { - return "type" +// DescriptorType implements the catalog.Descriptor interface. +func (desc *Immutable) DescriptorType() catalog.DescriptorType { + return catalog.Type } // MaybeIncrementVersion implements the MutableDescriptor interface. @@ -294,9 +244,7 @@ func (desc *Mutable) OriginalVersion() descpb.DescriptorVersion { // ImmutableCopy implements the MutableDescriptor interface. func (desc *Mutable) ImmutableCopy() catalog.Descriptor { - // TODO (lucy): Should the immutable descriptor constructors always make a - // copy, so we don't have to do it here? - imm := NewImmutable(*protoutil.Clone(desc.TypeDesc()).(*descpb.TypeDescriptor)) + imm := NewBuilder(desc.TypeDesc()).BuildImmutableType() imm.isUncommittedVersion = desc.IsUncommittedVersion() return imm } diff --git a/pkg/sql/catalog/typedesc/type_desc_builder.go b/pkg/sql/catalog/typedesc/type_desc_builder.go new file mode 100644 index 000000000000..fc7179d138e3 --- /dev/null +++ b/pkg/sql/catalog/typedesc/type_desc_builder.go @@ -0,0 +1,110 @@ +// Copyright 2021 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package typedesc + +import ( + "context" + + "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/util/protoutil" +) + +// TypeDescriptorBuilder is an extension of catalog.DescriptorBuilder +// for type descriptors. +type TypeDescriptorBuilder interface { + catalog.DescriptorBuilder + BuildImmutableType() *Immutable + BuildExistingMutableType() *Mutable + BuildCreatedMutableType() *Mutable +} + +type typeDescriptorBuilder struct { + original *descpb.TypeDescriptor +} + +var _ TypeDescriptorBuilder = &typeDescriptorBuilder{} + +// NewBuilder creates a new catalog.DescriptorBuilder object for building +// type descriptors. +func NewBuilder(desc *descpb.TypeDescriptor) TypeDescriptorBuilder { + return &typeDescriptorBuilder{ + original: protoutil.Clone(desc).(*descpb.TypeDescriptor), + } +} + +// DescriptorType implements the catalog.DescriptorBuilder interface. +func (tdb *typeDescriptorBuilder) DescriptorType() catalog.DescriptorType { + return catalog.Type +} + +// RunPostDeserializationChanges implements the catalog.DescriptorBuilder +// interface. +func (tdb *typeDescriptorBuilder) RunPostDeserializationChanges( + _ context.Context, _ catalog.DescGetter, +) error { + return nil +} + +// BuildImmutable implements the catalog.DescriptorBuilder interface. +func (tdb *typeDescriptorBuilder) BuildImmutable() catalog.Descriptor { + return tdb.BuildImmutableType() +} + +// BuildImmutableType returns an immutable type descriptor. +func (tdb *typeDescriptorBuilder) BuildImmutableType() *Immutable { + imm := makeImmutable(tdb.original) + return &imm +} + +// BuildExistingMutable implements the catalog.DescriptorBuilder interface. +func (tdb *typeDescriptorBuilder) BuildExistingMutable() catalog.MutableDescriptor { + return tdb.BuildExistingMutableType() +} + +// BuildExistingMutableType returns a mutable descriptor for a type +// which already exists. +func (tdb *typeDescriptorBuilder) BuildExistingMutableType() *Mutable { + clusterVersion := makeImmutable(protoutil.Clone(tdb.original).(*descpb.TypeDescriptor)) + return &Mutable{Immutable: makeImmutable(tdb.original), ClusterVersion: &clusterVersion} +} + +// BuildCreatedMutable implements the catalog.DescriptorBuilder interface. +func (tdb *typeDescriptorBuilder) BuildCreatedMutable() catalog.MutableDescriptor { + return tdb.BuildCreatedMutableType() +} + +// BuildCreatedMutableType returns a mutable descriptor for a type +// which is in the process of being created. +func (tdb *typeDescriptorBuilder) BuildCreatedMutableType() *Mutable { + return &Mutable{Immutable: makeImmutable(tdb.original)} +} + +func makeImmutable(desc *descpb.TypeDescriptor) Immutable { + immutDesc := Immutable{TypeDescriptor: *desc} + + // Initialize metadata specific to the TypeDescriptor kind. + switch immutDesc.Kind { + case descpb.TypeDescriptor_ENUM, descpb.TypeDescriptor_MULTIREGION_ENUM: + immutDesc.logicalReps = make([]string, len(desc.EnumMembers)) + immutDesc.physicalReps = make([][]byte, len(desc.EnumMembers)) + immutDesc.readOnlyMembers = make([]bool, len(desc.EnumMembers)) + for i := range desc.EnumMembers { + member := &desc.EnumMembers[i] + immutDesc.logicalReps[i] = member.LogicalRepresentation + immutDesc.physicalReps[i] = member.PhysicalRepresentation + immutDesc.readOnlyMembers[i] = + member.Capability == descpb.TypeDescriptor_EnumMember_READ_ONLY + } + } + + return immutDesc +} diff --git a/pkg/sql/catalog/typedesc/type_desc_test.go b/pkg/sql/catalog/typedesc/type_desc_test.go index 1b4f99147338..26ecae8cbb32 100644 --- a/pkg/sql/catalog/typedesc/type_desc_test.go +++ b/pkg/sql/catalog/typedesc/type_desc_test.go @@ -331,8 +331,8 @@ func TestTypeDescIsCompatibleWith(t *testing.T) { } for i, test := range tests { - a := typedesc.NewImmutable(test.a) - b := typedesc.NewImmutable(test.b) + a := typedesc.NewBuilder(&test.a).BuildImmutableType() + b := typedesc.NewBuilder(&test.b).BuildImmutableType() err := a.IsCompatibleWith(b) if test.err == "" { require.NoError(t, err) @@ -349,20 +349,20 @@ func TestValidateTypeDesc(t *testing.T) { ctx := context.Background() descs := catalog.MapDescGetter{} - descs[100] = dbdesc.NewImmutable(descpb.DatabaseDescriptor{ + descs[100] = dbdesc.NewBuilder(&descpb.DatabaseDescriptor{ Name: "db", ID: 100, - }) - descs[101] = schemadesc.NewImmutable(descpb.SchemaDescriptor{ + }).BuildImmutable() + descs[101] = schemadesc.NewBuilder(&descpb.SchemaDescriptor{ ID: 101, ParentID: 100, Name: "schema", - }) - descs[102] = typedesc.NewImmutable(descpb.TypeDescriptor{ + }).BuildImmutable() + descs[102] = typedesc.NewBuilder(&descpb.TypeDescriptor{ ID: 102, Name: "type", - }) - descs[200] = dbdesc.NewImmutable(descpb.DatabaseDescriptor{ + }).BuildImmutable() + descs[200] = dbdesc.NewBuilder(&descpb.DatabaseDescriptor{ Name: "multi-region-db", ID: 200, RegionConfig: &descpb.DatabaseDescriptor_RegionConfig{ @@ -371,7 +371,7 @@ func TestValidateTypeDesc(t *testing.T) { }, PrimaryRegion: "us-east-1", }, - }) + }).BuildImmutable() defaultPrivileges := descpb.NewDefaultPrivilegeDescriptor(security.RootUserName()) invalidPrivileges := descpb.NewDefaultPrivilegeDescriptor(security.RootUserName()) @@ -683,7 +683,7 @@ func TestValidateTypeDesc(t *testing.T) { }, }, { - "user testuser must not have SELECT privileges on system type with ID=50", + "user testuser must not have SELECT privileges on type with ID=50", descpb.TypeDescriptor{ Name: "t", ID: typeDescID, @@ -803,8 +803,8 @@ func TestValidateTypeDesc(t *testing.T) { } for i, test := range testData { - desc := typedesc.NewImmutable(test.desc) - expectedErr := fmt.Sprintf("%s %q (%d): %s", desc.TypeName(), desc.GetName(), desc.GetID(), test.err) + desc := typedesc.NewBuilder(&test.desc).BuildImmutable() + expectedErr := fmt.Sprintf("%s %q (%d): %s", desc.DescriptorType(), desc.GetName(), desc.GetID(), test.err) if err := catalog.ValidateSelfAndCrossReferences(ctx, descs, desc); err == nil { t.Errorf("#%d expected err: %s but found nil: %v", i, expectedErr, test.desc) } else if expectedErr != err.Error() { diff --git a/pkg/sql/catalog/validate.go b/pkg/sql/catalog/validate.go index 06580780c1cf..375df9740912 100644 --- a/pkg/sql/catalog/validate.go +++ b/pkg/sql/catalog/validate.go @@ -35,6 +35,9 @@ func Validate( ) ValidationErrors { // Check internal descriptor consistency. var vea validationErrorAccumulator + if level == NoValidation { + return &vea + } for _, desc := range descriptors { if level&ValidationLevelSelfOnly == 0 { continue @@ -85,8 +88,10 @@ func Validate( type ValidationLevel uint32 const ( + // NoValidation means don't perform any validation checks at all. + NoValidation ValidationLevel = 0 // ValidationLevelSelfOnly means only validate internal descriptor consistency. - ValidationLevelSelfOnly ValidationLevel = 1<<(iota+1) - 1 + ValidationLevelSelfOnly = 1<<(iota+1) - 1 // ValidationLevelSelfAndCrossReferences means do the above and also check // cross-references. ValidationLevelSelfAndCrossReferences @@ -180,7 +185,7 @@ func (vea *validationErrorAccumulator) Report(err error) { } func (vea *validationErrorAccumulator) setPrefix(desc Descriptor) { - vea.wrapPrefix = fmt.Sprintf("%s %q (%d)", desc.TypeName(), desc.GetName(), desc.GetID()) + vea.wrapPrefix = fmt.Sprintf("%s %q (%d)", desc.DescriptorType(), desc.GetName(), desc.GetID()) } // ValidationDescGetter is used by the validation methods on Descriptor. diff --git a/pkg/sql/check.go b/pkg/sql/check.go index 23d55353cb42..8325fe63b8ce 100644 --- a/pkg/sql/check.go +++ b/pkg/sql/check.go @@ -238,12 +238,10 @@ func validateForeignKey( txn *kv.Txn, codec keys.SQLCodec, ) error { - desc, err := catalogkv.GetDescriptorByID(ctx, txn, codec, fk.ReferencedTableID, catalogkv.Immutable, - catalogkv.TableDescriptorKind, true /* required */) + targetTable, err := catalogkv.MustGetTableDescByID(ctx, txn, codec, fk.ReferencedTableID) if err != nil { return err } - targetTable := desc.(catalog.TableDescriptor) nCols := len(fk.OriginColumnIDs) referencedColumnNames, err := targetTable.NamesForColumnIDs(fk.ReferencedColumnIDs) diff --git a/pkg/sql/colfetcher/colbatch_scan.go b/pkg/sql/colfetcher/colbatch_scan.go index cae59582b83e..83e93c10db13 100644 --- a/pkg/sql/colfetcher/colbatch_scan.go +++ b/pkg/sql/colfetcher/colbatch_scan.go @@ -200,7 +200,7 @@ func NewColBatchScan( // indicates that we're probably doing this wrong. Instead we should be // just setting the ID and Version in the spec or something like that and // retrieving the hydrated immutable from cache. - table := tabledesc.NewImmutable(spec.Table) + table := tabledesc.NewBuilder(&spec.Table).BuildImmutableTable() virtualColumn := tabledesc.FindVirtualColumn(table, spec.VirtualColumn) cols := table.PublicColumns() if spec.Visibility == execinfra.ScanVisibilityPublicAndNotPublic { diff --git a/pkg/sql/create_schema.go b/pkg/sql/create_schema.go index 97ab87b2c483..3deec3ffa96b 100644 --- a/pkg/sql/create_schema.go +++ b/pkg/sql/create_schema.go @@ -27,6 +27,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" "github.com/cockroachdb/cockroach/pkg/util/log/eventpb" "github.com/cockroachdb/cockroach/pkg/util/protoutil" @@ -69,7 +70,7 @@ func CreateUserDefinedSchemaDescriptor( // and can't be in a dropping state. if schemaID != descpb.InvalidID { // Check if the object already exists in a dropped state - desc, err := catalogkv.GetAnyDescriptorByID(ctx, txn, execCfg.Codec, schemaID, catalogkv.Immutable) + desc, err := catalogkv.MustGetSchemaDescByID(ctx, txn, execCfg.Codec, schemaID) if err != nil { return nil, nil, err } @@ -81,7 +82,7 @@ func CreateUserDefinedSchemaDescriptor( } return nil, nil, nil } - return nil, nil, pgerror.Newf(pgcode.DuplicateSchema, "schema %q already exists", schemaName) + return nil, nil, sqlerrors.NewSchemaAlreadyExistsError(schemaName) } // Check validity of the schema name. @@ -127,13 +128,13 @@ func CreateUserDefinedSchemaDescriptor( } // Create the SchemaDescriptor. - desc := schemadesc.NewCreatedMutable(descpb.SchemaDescriptor{ + desc := schemadesc.NewBuilder(&descpb.SchemaDescriptor{ ParentID: db.ID, Name: schemaName, ID: id, Privileges: privs, Version: 1, - }) + }).BuildCreatedMutableSchema() return desc, privs, nil } diff --git a/pkg/sql/create_table.go b/pkg/sql/create_table.go index 817cd538bfad..716fb95dee65 100644 --- a/pkg/sql/create_table.go +++ b/pkg/sql/create_table.go @@ -201,15 +201,18 @@ func getTableCreateParams( return nil, 0, err } - exists, id, err := catalogkv.LookupObjectID( - params.ctx, params.p.txn, params.ExecCfg().Codec, dbID, schemaID, tableName.Table()) - if err == nil && exists { - // Try and see what kind of object we collided with. - desc, err := catalogkv.GetAnyDescriptorByID(params.ctx, params.p.txn, params.ExecCfg().Codec, id, catalogkv.Immutable) - if err != nil { - return nil, 0, sqlerrors.WrapErrorWhileConstructingObjectAlreadyExistsErr(err) - } - + desc, err := catalogkv.GetDescriptorCollidingWithObject( + params.ctx, + params.p.txn, + params.ExecCfg().Codec, + dbID, + schemaID, + tableName.Table(), + ) + if err != nil { + return nil, descpb.InvalidID, err + } + if desc != nil { // Ensure that the descriptor that does exist has the appropriate type. { mismatchedType := true @@ -229,7 +232,7 @@ func getTableCreateParams( // Only complain about mismatched types for // if not exists clauses. if mismatchedType && ifNotExists { - return nil, 0, pgerror.Newf(pgcode.WrongObjectType, + return nil, descpb.InvalidID, pgerror.Newf(pgcode.WrongObjectType, "%q is not a %s", tableName.Table(), kind) @@ -238,16 +241,14 @@ func getTableCreateParams( // Check if the object already exists in a dropped state if desc.Dropped() { - return nil, 0, pgerror.Newf(pgcode.ObjectNotInPrerequisiteState, + return nil, descpb.InvalidID, pgerror.Newf(pgcode.ObjectNotInPrerequisiteState, "%s %q is being dropped, try again later", kind, tableName.Table()) } // Still return data in this case. - return tKey, schemaID, sqlerrors.MakeObjectAlreadyExistsError(desc.DescriptorProto(), tableName.Table()) - } else if err != nil { - return nil, 0, err + return tKey, schemaID, sqlerrors.MakeObjectAlreadyExistsError(desc.DescriptorProto(), tableName.FQString()) } return tKey, schemaID, nil @@ -617,12 +618,10 @@ func (p *planner) MaybeUpgradeDependentOldForeignKeyVersionTables( maybeUpgradeFKRepresentation := func(id descpb.ID) error { // Read the referenced table and see if the foreign key representation has changed. If it has, write // the upgraded descriptor back to disk. - desc, err := catalogkv.GetDescriptorByID(ctx, p.txn, p.ExecCfg().Codec, id, - catalogkv.Mutable, catalogkv.TableDescriptorKind, true /* required */) + tbl, err := catalogkv.MustGetMutableTableDescByID(ctx, p.txn, p.ExecCfg().Codec, id) if err != nil { return err } - tbl := desc.(*tabledesc.Mutable) changes := tbl.GetPostDeserializationChanges() if changes.UpgradedForeignKeyRepresentation { err := p.writeSchemaChange(ctx, tbl, descpb.InvalidMutationID, diff --git a/pkg/sql/create_type.go b/pkg/sql/create_type.go index 52081543c785..4339f726af1f 100644 --- a/pkg/sql/create_type.go +++ b/pkg/sql/create_type.go @@ -157,20 +157,19 @@ func getCreateTypeParams( sqltelemetry.IncrementUserDefinedSchemaCounter(sqltelemetry.UserDefinedSchemaUsedByObject) } - typeKey = catalogkv.MakeObjectNameKey(params.ctx, params.ExecCfg().Settings, db.GetID(), schemaID, name.Type()) - exists, collided, err := catalogkv.LookupObjectID( - params.ctx, params.p.txn, params.ExecCfg().Codec, db.GetID(), schemaID, name.Type()) - if err == nil && exists { - // Try and see what kind of object we collided with. - desc, err := catalogkv.GetAnyDescriptorByID(params.ctx, params.p.txn, params.ExecCfg().Codec, collided, catalogkv.Immutable) - if err != nil { - return nil, 0, sqlerrors.WrapErrorWhileConstructingObjectAlreadyExistsErr(err) - } - return nil, 0, sqlerrors.MakeObjectAlreadyExistsError(desc.DescriptorProto(), name.String()) - } + err = catalogkv.CheckObjectCollision( + params.ctx, + params.p.txn, + params.ExecCfg().Codec, + db.GetID(), + schemaID, + name, + ) if err != nil { - return nil, 0, err + return nil, descpb.InvalidID, err } + + typeKey = catalogkv.MakeObjectNameKey(params.ctx, params.ExecCfg().Settings, db.GetID(), schemaID, name.Type()) return typeKey, schemaID, nil } @@ -250,7 +249,7 @@ func (p *planner) createArrayType( // Construct the descriptor for the array type. // TODO(ajwerner): This is getting fixed up in a later commit to deal with // meta, just hold on. - arrayTypDesc := typedesc.NewCreatedMutable(descpb.TypeDescriptor{ + arrayTypDesc := typedesc.NewBuilder(&descpb.TypeDescriptor{ Name: arrayTypeName, ID: id, ParentID: db.GetID(), @@ -259,7 +258,7 @@ func (p *planner) createArrayType( Alias: types.MakeArray(elemTyp), Version: 1, Privileges: typDesc.Privileges, - }) + }).BuildCreatedMutable() jobStr := fmt.Sprintf("implicit array type creation for %s", typ) if err := p.createDescriptorWithID( @@ -363,18 +362,17 @@ func (p *planner) createEnumWithID( // a free list of descriptor ID's (#48438), we should allocate an ID from // there if id + oidext.CockroachPredefinedOIDMax overflows past the // maximum uint32 value. - typeDesc := typedesc.NewCreatedMutable( - descpb.TypeDescriptor{ - Name: typeName.Type(), - ID: id, - ParentID: dbDesc.GetID(), - ParentSchemaID: schemaID, - Kind: enumKind, - EnumMembers: members, - Version: 1, - Privileges: privs, - RegionConfig: regionConfig, - }) + typeDesc := typedesc.NewBuilder(&descpb.TypeDescriptor{ + Name: typeName.Type(), + ID: id, + ParentID: dbDesc.GetID(), + ParentSchemaID: schemaID, + Kind: enumKind, + EnumMembers: members, + Version: 1, + Privileges: privs, + RegionConfig: regionConfig, + }).BuildCreatedMutableType() // Create the implicit array type for this type before finishing the type. arrayTypeID, err := p.createArrayType(params, typeName, typeDesc, dbDesc, schemaID) diff --git a/pkg/sql/create_view.go b/pkg/sql/create_view.go index b6dfde35aa4c..7f88aa248624 100644 --- a/pkg/sql/create_view.go +++ b/pkg/sql/create_view.go @@ -95,7 +95,7 @@ func (n *createViewNode) startExec(params runParams) error { for id, updated := range n.planDeps { backRefMutable := params.p.Descriptors().GetUncommittedTableByID(id) if backRefMutable == nil { - backRefMutable = tabledesc.NewExistingMutable(*updated.desc.TableDesc()) + backRefMutable = tabledesc.NewBuilder(updated.desc.TableDesc()).BuildExistingMutableTable() } if !n.persistence.IsTemporary() && backRefMutable.Temporary { hasTempBackref = true diff --git a/pkg/sql/descriptor.go b/pkg/sql/descriptor.go index 69fc75d2b756..3510ba9e6d18 100644 --- a/pkg/sql/descriptor.go +++ b/pkg/sql/descriptor.go @@ -77,8 +77,7 @@ func (p *planner) createDatabase( if exists, databaseID, err := catalogkv.LookupDatabaseID(ctx, p.txn, p.ExecCfg().Codec, dbName); err == nil && exists { if database.IfNotExists { // Check if the database is in a dropping state - desc, err := catalogkv.GetDescriptorByID(ctx, p.txn, p.ExecCfg().Codec, databaseID, catalogkv.Immutable, - catalogkv.DatabaseDescriptorKind, true) + desc, err := catalogkv.MustGetDatabaseDescByID(ctx, p.txn, p.ExecCfg().Codec, databaseID) if err != nil { return nil, false, err } diff --git a/pkg/sql/distsql_plan_csv.go b/pkg/sql/distsql_plan_csv.go index db57d110b2d1..ca8ac09d59ee 100644 --- a/pkg/sql/distsql_plan_csv.go +++ b/pkg/sql/distsql_plan_csv.go @@ -158,7 +158,7 @@ func presplitTableBoundaries( expirationTime := cfg.DB.Clock().Now().Add(time.Hour.Nanoseconds(), 0) for _, tbl := range tables { // TODO(ajwerner): Consider passing in the wrapped descriptors. - tblDesc := tabledesc.NewImmutable(*tbl.Desc) + tblDesc := tabledesc.NewBuilder(tbl.Desc).BuildImmutableTable() for _, span := range tblDesc.AllIndexSpans(cfg.Codec) { if err := cfg.DB.AdminSplit(ctx, span.Key, expirationTime); err != nil { return err diff --git a/pkg/sql/distsql_plan_stats.go b/pkg/sql/distsql_plan_stats.go index 9c7070db0a2c..fa5da625e69c 100644 --- a/pkg/sql/distsql_plan_stats.go +++ b/pkg/sql/distsql_plan_stats.go @@ -249,7 +249,7 @@ func (dsp *DistSQLPlanner) createPlanForCreateStats( } } - tableDesc := tabledesc.NewImmutable(details.Table) + tableDesc := tabledesc.NewBuilder(&details.Table).BuildImmutableTable() return dsp.createStatsPlan(planCtx, tableDesc, reqStats, job) } diff --git a/pkg/sql/doctor/doctor.go b/pkg/sql/doctor/doctor.go index c24902a903fd..3f9dd138b85c 100644 --- a/pkg/sql/doctor/doctor.go +++ b/pkg/sql/doctor/doctor.go @@ -57,15 +57,17 @@ type namespaceReverseMap map[int64][]descpb.NameInfo // JobsTable represents data read from `system.jobs`. type JobsTable []jobs.JobMetadata -func newDescGetter(ctx context.Context, rows []DescriptorTableRow) (catalog.MapDescGetter, error) { +func newDescGetter(rows []DescriptorTableRow) (catalog.MapDescGetter, error) { pg := catalog.MapDescGetter{} for _, r := range rows { var d descpb.Descriptor if err := protoutil.Unmarshal(r.DescBytes, &d); err != nil { return nil, errors.Errorf("failed to unmarshal descriptor %d: %v", r.ID, err) } - descpb.MaybeSetDescriptorModificationTimeFromMVCCTimestamp(ctx, &d, r.ModTime) - pg[descpb.ID(r.ID)] = catalogkv.UnwrapDescriptorRaw(ctx, &d) + b := catalogkv.NewBuilderWithMVCCTimestamp(&d, r.ModTime) + if b != nil { + pg[descpb.ID(r.ID)] = b.BuildImmutable() + } } return pg, nil } @@ -119,7 +121,7 @@ func ExamineDescriptors( fmt.Fprintf( stdout, "Examining %d descriptors and %d namespace entries...\n", len(descTable), len(namespaceTable)) - descGetter, err := newDescGetter(ctx, descTable) + descGetter, err := newDescGetter(descTable) if err != nil { return false, err } @@ -263,7 +265,7 @@ func ExamineJobs( stdout io.Writer, ) (ok bool, err error) { fmt.Fprintf(stdout, "Examining %d running jobs...\n", len(jobsTable)) - descGetter, err := newDescGetter(ctx, descTable) + descGetter, err := newDescGetter(descTable) if err != nil { return false, err } @@ -307,7 +309,7 @@ func reportMsg(desc catalog.Descriptor, format string, args ...interface{}) stri msg := fmt.Sprintf(format, args...) // Add descriptor-identifying prefix if it isn't there already. // The prefix has the same format as the validation error wrapper. - msgPrefix := fmt.Sprintf("%s %q (%d): ", desc.TypeName(), desc.GetName(), desc.GetID()) + msgPrefix := fmt.Sprintf("%s %q (%d): ", desc.DescriptorType(), desc.GetName(), desc.GetID()) if msg[:len(msgPrefix)] == msgPrefix { msgPrefix = "" } diff --git a/pkg/sql/doctor/doctor_test.go b/pkg/sql/doctor/doctor_test.go index 8101482ec981..5dea17f4d7e7 100644 --- a/pkg/sql/doctor/doctor_test.go +++ b/pkg/sql/doctor/doctor_test.go @@ -61,25 +61,18 @@ var validTableDesc = &descpb.Descriptor{ } func toBytes(t *testing.T, desc *descpb.Descriptor) []byte { - if desc.GetDatabase() != nil { - if desc.GetDatabase().Privileges == nil { - descpb.MaybeFixPrivileges(desc.GetDatabase().GetID(), &desc.GetDatabase().Privileges) - } - } else if desc.GetSchema() != nil { - if desc.GetSchema().Privileges == nil { - descpb.MaybeFixPrivileges(desc.GetSchema().GetID(), &desc.GetSchema().Privileges) - } - } else if tbl := descpb.TableFromDescriptor(desc, hlc.Timestamp{}); tbl != nil { - if tbl.Privileges == nil { - descpb.MaybeFixPrivileges(tbl.GetID(), &tbl.Privileges) - } - if tbl.FormatVersion == 0 { - tbl.FormatVersion = descpb.InterleavedFormatVersion - } - } else if typ := descpb.TypeFromDescriptor(desc, hlc.Timestamp{}); typ != nil { - if typ.Privileges == nil { - descpb.MaybeFixPrivileges(typ.GetID(), &typ.Privileges) + table, database, typ, schema := descpb.FromDescriptor(desc) + if table != nil { + descpb.MaybeFixPrivileges(table.GetID(), &table.Privileges) + if table.FormatVersion == 0 { + table.FormatVersion = descpb.InterleavedFormatVersion } + } else if database != nil { + descpb.MaybeFixPrivileges(database.GetID(), &database.Privileges) + } else if typ != nil { + descpb.MaybeFixPrivileges(typ.GetID(), &typ.Privileges) + } else if schema != nil { + descpb.MaybeFixPrivileges(schema.GetID(), &schema.Privileges) } res, err := protoutil.Marshal(desc) require.NoError(t, err) @@ -91,12 +84,16 @@ func TestExamineDescriptors(t *testing.T) { defer log.Scope(t).Close(t) droppedValidTableDesc := protoutil.Clone(validTableDesc).(*descpb.Descriptor) - descpb.TableFromDescriptor(droppedValidTableDesc, hlc.Timestamp{WallTime: 1}). - State = descpb.DescriptorState_DROP + { + tbl, _, _, _ := descpb.FromDescriptorWithMVCCTimestamp(droppedValidTableDesc, hlc.Timestamp{WallTime: 1}) + tbl.State = descpb.DescriptorState_DROP + } inSchemaValidTableDesc := protoutil.Clone(validTableDesc).(*descpb.Descriptor) - descpb.TableFromDescriptor(inSchemaValidTableDesc, hlc.Timestamp{WallTime: 1}). - UnexposedParentSchemaID = 3 + { + tbl, _, _, _ := descpb.FromDescriptorWithMVCCTimestamp(inSchemaValidTableDesc, hlc.Timestamp{WallTime: 1}) + tbl.UnexposedParentSchemaID = 3 + } tests := []struct { descTable doctor.DescriptorTable diff --git a/pkg/sql/drop_table.go b/pkg/sql/drop_table.go index 66c8e586b566..810ba224d429 100644 --- a/pkg/sql/drop_table.go +++ b/pkg/sql/drop_table.go @@ -339,7 +339,7 @@ func (p *planner) dropTableImpl( dependedOnBy := append([]descpb.TableDescriptor_Reference(nil), tableDesc.DependedOnBy...) for _, ref := range dependedOnBy { viewDesc, err := p.getViewDescForCascade( - ctx, tableDesc.TypeName(), tableDesc.Name, tableDesc.ParentID, ref.ID, tree.DropCascade, + ctx, string(tableDesc.DescriptorType()), tableDesc.Name, tableDesc.ParentID, ref.ID, tree.DropCascade, ) if err != nil { return droppedViews, err diff --git a/pkg/sql/drop_test.go b/pkg/sql/drop_test.go index 33a0b4a5bf57..25a0f353a9bb 100644 --- a/pkg/sql/drop_test.go +++ b/pkg/sql/drop_test.go @@ -885,14 +885,9 @@ func TestDropTableWhileUpgradingFormat(t *testing.T) { // Simulate a migration upgrading the table descriptor's format version after // the table has been dropped but before the truncation has occurred. - if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - desc, err := catalogkv.GetDescriptorByID(ctx, txn, keys.SystemSQLCodec, tableDesc.ID, - catalogkv.Mutable, catalogkv.TableDescriptorKind, true /* required */) - if err != nil { - return err - } - tableDesc = desc.(*tabledesc.Mutable) - return nil + if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { + tableDesc, err = catalogkv.MustGetMutableTableDescByID(ctx, txn, keys.SystemSQLCodec, tableDesc.ID) + return err }); err != nil { t.Fatal(err) } diff --git a/pkg/sql/drop_view.go b/pkg/sql/drop_view.go index 1642c663142a..3c1b605de4d3 100644 --- a/pkg/sql/drop_view.go +++ b/pkg/sql/drop_view.go @@ -138,7 +138,7 @@ func (p *planner) canRemoveDependentView( ref descpb.TableDescriptor_Reference, behavior tree.DropBehavior, ) error { - return p.canRemoveDependentViewGeneric(ctx, from.TypeName(), from.Name, from.ParentID, ref, behavior) + return p.canRemoveDependentViewGeneric(ctx, string(from.DescriptorType()), from.Name, from.ParentID, ref, behavior) } func (p *planner) canRemoveDependentViewGeneric( @@ -218,7 +218,7 @@ func (p *planner) dropViewImpl( dependedOnBy := append([]descpb.TableDescriptor_Reference(nil), viewDesc.DependedOnBy...) for _, ref := range dependedOnBy { dependentDesc, err := p.getViewDescForCascade( - ctx, viewDesc.TypeName(), viewDesc.Name, viewDesc.ParentID, ref.ID, behavior, + ctx, string(viewDesc.DescriptorType()), viewDesc.Name, viewDesc.ParentID, ref.ID, behavior, ) if err != nil { return cascadeDroppedViews, err diff --git a/pkg/sql/execinfrapb/flow_diagram.go b/pkg/sql/execinfrapb/flow_diagram.go index ff3ec8deeec4..517653b106c4 100644 --- a/pkg/sql/execinfrapb/flow_diagram.go +++ b/pkg/sql/execinfrapb/flow_diagram.go @@ -147,7 +147,7 @@ func (tr *TableReaderSpec) summary() (string, []string) { details := []string{indexDetail(&tr.Table, tr.IndexIdx)} if len(tr.Spans) > 0 { - tbl := tabledesc.NewImmutable(tr.Table) + tbl := tabledesc.NewBuilder(&tr.Table).BuildImmutableTable() // only show the first span idx := tbl.ActiveIndexes()[int(tr.IndexIdx)] valDirs := catalogkeys.IndexKeyValDirs(idx.IndexDesc()) diff --git a/pkg/sql/gcjob/gc_job.go b/pkg/sql/gcjob/gc_job.go index 9f85734d7729..c7394e7959d0 100644 --- a/pkg/sql/gcjob/gc_job.go +++ b/pkg/sql/gcjob/gc_job.go @@ -105,7 +105,7 @@ func (r schemaChangeGCResumer) Resume(ctx context.Context, execCtx interface{}) if err := sql.TruncateInterleavedIndexes( ctx, execCfg, - tabledesc.NewImmutable(*details.InterleavedTable), + tabledesc.NewBuilder(details.InterleavedTable).BuildImmutableTable(), details.InterleavedIndexes, ); err != nil { return err diff --git a/pkg/sql/gcjob/index_garbage_collection.go b/pkg/sql/gcjob/index_garbage_collection.go index e1b75b1616ce..270ee6c3452e 100644 --- a/pkg/sql/gcjob/index_garbage_collection.go +++ b/pkg/sql/gcjob/index_garbage_collection.go @@ -66,7 +66,11 @@ func gcIndexes( // All the data chunks have been removed. Now also removed the // zone configs for the dropped indexes, if any. if err := execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return sql.RemoveIndexZoneConfigs(ctx, txn, execCfg, parentTable.GetID(), []descpb.IndexDescriptor{indexDesc}) + freshParentTableDesc, err := catalogkv.MustGetTableDescByID(ctx, txn, execCfg.Codec, parentID) + if err != nil { + return err + } + return sql.RemoveIndexZoneConfigs(ctx, txn, execCfg, freshParentTableDesc, []descpb.IndexDescriptor{indexDesc}) }); err != nil { return errors.Wrapf(err, "removing index %d zone configs", indexDesc.ID) } diff --git a/pkg/sql/gcjob_test/gc_job_test.go b/pkg/sql/gcjob_test/gc_job_test.go index de4696ff18d5..280eab394aad 100644 --- a/pkg/sql/gcjob_test/gc_job_test.go +++ b/pkg/sql/gcjob_test/gc_job_test.go @@ -92,20 +92,13 @@ func TestSchemaChangeGCJob(t *testing.T) { var myTableDesc *tabledesc.Mutable var myOtherTableDesc *tabledesc.Mutable - if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - myDesc, err := catalogkv.GetDescriptorByID(ctx, txn, keys.SystemSQLCodec, myTableID, - catalogkv.Mutable, catalogkv.TableDescriptorKind, true /* required */) - if err != nil { - return err - } - myTableDesc = myDesc.(*tabledesc.Mutable) - myOtherDesc, err := catalogkv.GetDescriptorByID(ctx, txn, keys.SystemSQLCodec, myOtherTableID, - catalogkv.Mutable, catalogkv.TableDescriptorKind, true /* required */) + if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { + myTableDesc, err = catalogkv.MustGetMutableTableDescByID(ctx, txn, keys.SystemSQLCodec, myTableID) if err != nil { return err } - myOtherTableDesc = myOtherDesc.(*tabledesc.Mutable) - return nil + myOtherTableDesc, err = catalogkv.MustGetMutableTableDescByID(ctx, txn, keys.SystemSQLCodec, myOtherTableID) + return err }); err != nil { t.Fatal(err) } @@ -226,9 +219,8 @@ func TestSchemaChangeGCJob(t *testing.T) { } } - if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - myDesc, err := catalogkv.GetDescriptorByID(ctx, txn, keys.SystemSQLCodec, myTableID, - catalogkv.Mutable, catalogkv.TableDescriptorKind, true /* required */) + if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { + myTableDesc, err = catalogkv.MustGetMutableTableDescByID(ctx, txn, keys.SystemSQLCodec, myTableID) if ttlTime != FUTURE && (dropItem == TABLE || dropItem == DATABASE) { // We dropped the table, so expect it to not be found. require.EqualError(t, err, "descriptor not found") @@ -237,19 +229,13 @@ func TestSchemaChangeGCJob(t *testing.T) { if err != nil { return err } - myTableDesc = myDesc.(*tabledesc.Mutable) - myOtherDesc, err := catalogkv.GetDescriptorByID(ctx, txn, keys.SystemSQLCodec, myOtherTableID, - catalogkv.Mutable, catalogkv.TableDescriptorKind, true /* required */) + myOtherTableDesc, err = catalogkv.MustGetMutableTableDescByID(ctx, txn, keys.SystemSQLCodec, myOtherTableID) if ttlTime != FUTURE && dropItem == DATABASE { // We dropped the entire database, so expect none of the tables to be found. require.EqualError(t, err, "descriptor not found") return nil } - if err != nil { - return err - } - myOtherTableDesc = myOtherDesc.(*tabledesc.Mutable) - return nil + return err }); err != nil { t.Fatal(err) } diff --git a/pkg/sql/index_backfiller.go b/pkg/sql/index_backfiller.go index 860a8d3ffd19..27de5e9718b3 100644 --- a/pkg/sql/index_backfiller.go +++ b/pkg/sql/index_backfiller.go @@ -123,7 +123,7 @@ func (ib *IndexBackfillPlanner) plan( var p *PhysicalPlan var evalCtx extendedEvalContext var planCtx *PlanningCtx - td := tabledesc.NewExistingMutable(*tableDesc.TableDesc()) + td := tabledesc.NewBuilder(tableDesc.TableDesc()).BuildExistingMutableTable() if err := ib.execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { evalCtx = createSchemaChangeEvalCtx(ctx, ib.execCfg, nowTimestamp, ib.ieFactory) planCtx = ib.execCfg.DistSQLPlanner.NewPlanningCtx(ctx, &evalCtx, nil /* planner */, txn, diff --git a/pkg/sql/information_schema.go b/pkg/sql/information_schema.go index abe967efa83b..b5a797b47ddd 100755 --- a/pkg/sql/information_schema.go +++ b/pkg/sql/information_schema.go @@ -731,7 +731,7 @@ CREATE TABLE information_schema.constraint_column_usage ( // For foreign key constraint, constraint_column_usage // identifies the table/columns that the foreign key // references. - conTable = tabledesc.NewImmutable(*con.ReferencedTable) + conTable = tabledesc.NewBuilder(con.ReferencedTable).BuildImmutableTable() conCols, err = conTable.NamesForColumnIDs(con.FK.ReferencedColumnIDs) if err != nil { return err diff --git a/pkg/sql/logictest/testdata/logic_test/create_as b/pkg/sql/logictest/testdata/logic_test/create_as index dc66084cd412..506678ca544c 100644 --- a/pkg/sql/logictest/testdata/logic_test/create_as +++ b/pkg/sql/logictest/testdata/logic_test/create_as @@ -96,7 +96,7 @@ SELECT * FROM something LIMIT 1 statement ok CREATE TABLE foo (x, y, z) AS SELECT catalog_name, schema_name, sql_path FROM information_schema.schemata -statement error pq: relation "foo" already exists +statement error pq: relation "test.public.foo" already exists CREATE TABLE foo (x, y, z) AS SELECT catalog_name, schema_name, sql_path FROM information_schema.schemata statement error pq: value type tuple cannot be used for table columns diff --git a/pkg/sql/logictest/testdata/logic_test/drop_type b/pkg/sql/logictest/testdata/logic_test/drop_type index 62ebc979507b..8af58ac634fb 100644 --- a/pkg/sql/logictest/testdata/logic_test/drop_type +++ b/pkg/sql/logictest/testdata/logic_test/drop_type @@ -46,7 +46,7 @@ statement ok BEGIN; DROP TYPE t -statement error pq: type \"t\" already exists +statement error pq: type \"test.public.t\" already exists CREATE TYPE t AS ENUM () statement ok diff --git a/pkg/sql/logictest/testdata/logic_test/enums b/pkg/sql/logictest/testdata/logic_test/enums index 2402a15f4166..502c89ac4f9a 100644 --- a/pkg/sql/logictest/testdata/logic_test/enums +++ b/pkg/sql/logictest/testdata/logic_test/enums @@ -6,16 +6,16 @@ CREATE TYPE t AS ENUM () statement error pq: relation "t" does not exist SELECT * FROM t -statement error pq: type "t" already exists +statement error pq: type "test.public.t" already exists CREATE TABLE t (x INT) -statement error pq: type "t" already exists +statement error pq: type "test.public.t" already exists CREATE TYPE t AS ENUM () statement ok CREATE TABLE torename (x INT) -statement error pq: type "t" already exists +statement error pq: type "test.public.t" already exists ALTER TABLE torename RENAME TO t statement ok @@ -1300,7 +1300,7 @@ subtest if_not_exists statement ok CREATE TYPE ifne AS ENUM ('hi') -statement error pq: type "ifne" already exists +statement error pq: type "test_57196.public.ifne" already exists CREATE TYPE ifne AS ENUM ('hi') statement ok @@ -1309,7 +1309,7 @@ CREATE TYPE IF NOT EXISTS ifne AS ENUM ('hi') statement ok CREATE TABLE table_ifne (x INT) -statement error pq: relation "table_ifne" already exists +statement error pq: relation "test_57196.public.table_ifne" already exists CREATE TYPE IF NOT EXISTS table_ifne AS ENUM ('hi') # Regression test for incorrectly serializing NULL expression type annotation in diff --git a/pkg/sql/logictest/testdata/logic_test/grant_database b/pkg/sql/logictest/testdata/logic_test/grant_database index 180c6c217601..cbdcd809c331 100644 --- a/pkg/sql/logictest/testdata/logic_test/grant_database +++ b/pkg/sql/logictest/testdata/logic_test/grant_database @@ -8,10 +8,10 @@ database_name grantee privilege_type a admin ALL a root ALL -statement error user root must have exactly ALL privileges on system database with ID=.* +statement error user root must have exactly ALL privileges on database with ID=.* REVOKE SELECT ON DATABASE a FROM root -statement error user admin must have exactly ALL privileges on system database with ID=.* +statement error user admin must have exactly ALL privileges on database with ID=.* REVOKE SELECT ON DATABASE a FROM admin statement ok diff --git a/pkg/sql/logictest/testdata/logic_test/rename_table b/pkg/sql/logictest/testdata/logic_test/rename_table index e2e8bb4a2b67..23c57acf4083 100644 --- a/pkg/sql/logictest/testdata/logic_test/rename_table +++ b/pkg/sql/logictest/testdata/logic_test/rename_table @@ -68,7 +68,7 @@ CREATE TABLE t ( statement ok INSERT INTO t VALUES (4, 16), (5, 25) -statement error pgcode 42P07 relation "new_kv" already exists +statement error pgcode 42P07 relation "test.public.new_kv" already exists ALTER TABLE t RENAME TO new_kv user testuser diff --git a/pkg/sql/logictest/testdata/logic_test/rename_view b/pkg/sql/logictest/testdata/logic_test/rename_view index d215a3d259e1..77b945f27e12 100644 --- a/pkg/sql/logictest/testdata/logic_test/rename_view +++ b/pkg/sql/logictest/testdata/logic_test/rename_view @@ -81,7 +81,7 @@ INSERT INTO t VALUES (4, 16), (5, 25) statement ok CREATE VIEW v as SELECT c1,c2 from t -statement error pgcode 42P07 relation "new_v" already exists +statement error pgcode 42P07 relation "test.public.new_v" already exists ALTER VIEW v RENAME TO new_v user testuser diff --git a/pkg/sql/logictest/testdata/logic_test/save_table b/pkg/sql/logictest/testdata/logic_test/save_table index 522ec5834f43..9da597e1319c 100644 --- a/pkg/sql/logictest/testdata/logic_test/save_table +++ b/pkg/sql/logictest/testdata/logic_test/save_table @@ -27,7 +27,7 @@ SELECT * FROM t 4 four 5 five -query error create save table: relation "save_table_test_scan_1" already exists +query error create save table: relation "savetables.public.save_table_test_scan_1" already exists SELECT * FROM u statement ok diff --git a/pkg/sql/logictest/testdata/logic_test/schema b/pkg/sql/logictest/testdata/logic_test/schema index 5a9a82cc0b67..090cf2129b89 100644 --- a/pkg/sql/logictest/testdata/logic_test/schema +++ b/pkg/sql/logictest/testdata/logic_test/schema @@ -766,13 +766,13 @@ ALTER TABLE sch.table_to_rename RENAME TO renamed_table; statement ok ALTER TABLE sch.renamed_table RENAME TO sch.renamed_table_2; -statement error pq: relation "table_exists" already exists +statement error pq: relation "d54662.sch.table_exists" already exists ALTER TABLE sch.renamed_table_2 RENAME TO sch.table_exists; statement ok ALTER TABLE public_table_to_rename RENAME TO public.renamed_public_table; -statement error pq: relation "public_table_exists" already exists +statement error pq: relation "d54662.public.public_table_exists" already exists ALTER TABLE renamed_public_table RENAME TO public_table_exists; subtest show_tables diff --git a/pkg/sql/logictest/testdata/logic_test/sequences b/pkg/sql/logictest/testdata/logic_test/sequences index 4c69588d3257..0eb708968bcd 100644 --- a/pkg/sql/logictest/testdata/logic_test/sequences +++ b/pkg/sql/logictest/testdata/logic_test/sequences @@ -90,7 +90,7 @@ statement ok CREATE SEQUENCE foo # A sequence with the same name can't be created again. -statement error pgcode 42P07 relation "foo" already exists +statement error pgcode 42P07 relation "test.public.foo" already exists CREATE SEQUENCE foo statement ok @@ -100,7 +100,7 @@ statement error pgcode 42601 conflicting or redundant options CREATE SEQUENCE bar INCREMENT 5 MAXVALUE 1000 INCREMENT 2 # Sequences are in the same namespace as tables. -statement error pgcode 42P07 relation "foo" already exists +statement error pgcode 42P07 relation "test.public.foo" already exists CREATE TABLE foo (k BYTES PRIMARY KEY, v BYTES) # You can't create with 0 increment. diff --git a/pkg/sql/logictest/testdata/logic_test/set b/pkg/sql/logictest/testdata/logic_test/set index f6ea9f0f06cc..5b3e58394803 100644 --- a/pkg/sql/logictest/testdata/logic_test/set +++ b/pkg/sql/logictest/testdata/logic_test/set @@ -50,7 +50,7 @@ SHOW TABLES FROM foo public bar table root 0 NULL # SET statement succeeds, CREATE TABLE fails. -statement error pgcode 42P07 relation \"bar\" already exists +statement error pgcode 42P07 relation \"foo.public.bar\" already exists SET database = foo; CREATE TABLE bar (k INT PRIMARY KEY) query T colnames diff --git a/pkg/sql/logictest/testdata/logic_test/table b/pkg/sql/logictest/testdata/logic_test/table index 1bba390edcd3..3499277e9546 100644 --- a/pkg/sql/logictest/testdata/logic_test/table +++ b/pkg/sql/logictest/testdata/logic_test/table @@ -10,7 +10,7 @@ CREATE TABLE test."" (id INT PRIMARY KEY) statement ok CREATE TABLE test.a (id INT PRIMARY KEY) -statement error pgcode 42P07 relation "a" already exists +statement error pgcode 42P07 relation "test.public.a" already exists CREATE TABLE test.a (id INT PRIMARY KEY) statement ok @@ -19,7 +19,7 @@ SET DATABASE = test statement error invalid table name: "" CREATE TABLE "" (id INT PRIMARY KEY) -statement error pgcode 42P07 relation "a" already exists +statement error pgcode 42P07 relation "test.public.a" already exists CREATE TABLE a (id INT PRIMARY KEY) statement error duplicate column name: "id" diff --git a/pkg/sql/logictest/testdata/logic_test/views b/pkg/sql/logictest/testdata/logic_test/views index 942ba497c105..55518b0ad82d 100644 --- a/pkg/sql/logictest/testdata/logic_test/views +++ b/pkg/sql/logictest/testdata/logic_test/views @@ -13,10 +13,10 @@ INSERT INTO t VALUES (1, 99), (2, 98), (3, 97) statement ok CREATE VIEW v1 AS SELECT a, b FROM t -statement error pgcode 42P07 relation \"v1\" already exists +statement error pgcode 42P07 relation \"test.public.v1\" already exists CREATE VIEW v1 AS SELECT a, b FROM t -statement error pgcode 42P07 relation \"t\" already exists +statement error pgcode 42P07 relation \"test.public.t\" already exists CREATE VIEW t AS SELECT a, b FROM t # view statement ignored if other way around. @@ -429,7 +429,7 @@ DROP TABLE t statement ok CREATE VIEW foo AS SELECT catalog_name, schema_name, sql_path FROM information_schema.schemata -statement error pq: relation "foo" already exists +statement error pq: relation "test.public.foo" already exists CREATE VIEW foo AS SELECT catalog_name, schema_name, sql_path FROM information_schema.schemata # Ensure views work with dates/timestamps (#12420) diff --git a/pkg/sql/namespace_test.go b/pkg/sql/namespace_test.go index f2c0faf3c6a7..16d451f54264 100644 --- a/pkg/sql/namespace_test.go +++ b/pkg/sql/namespace_test.go @@ -154,18 +154,18 @@ func TestNamespaceTableSemantics(t *testing.T) { // Creating a table should fail now, because an entry was explicitly added to // the old system.namespace_deprecated table. _, err = sqlDB.Exec(`CREATE TABLE test.public.rel(a int)`) - if !testutils.IsError(err, sqlerrors.NewRelationAlreadyExistsError("rel").Error()) { + if !testutils.IsError(err, sqlerrors.NewRelationAlreadyExistsError("test.public.rel").Error()) { t.Fatalf("unexpected error %v", err) } // Same applies to a table which doesn't explicitly specify the public schema, // as that is the default. _, err = sqlDB.Exec(`CREATE TABLE test.rel(a int)`) - if !testutils.IsError(err, sqlerrors.NewRelationAlreadyExistsError("rel").Error()) { + if !testutils.IsError(err, sqlerrors.NewRelationAlreadyExistsError("test.public.rel").Error()) { t.Fatalf("unexpected error %v", err) } // Can not create a sequence with the same name either. _, err = sqlDB.Exec(`CREATE SEQUENCE test.rel`) - if !testutils.IsError(err, sqlerrors.NewRelationAlreadyExistsError("rel").Error()) { + if !testutils.IsError(err, sqlerrors.NewRelationAlreadyExistsError("test.public.rel").Error()) { t.Fatalf("unexpected error %v", err) } @@ -174,7 +174,7 @@ func TestNamespaceTableSemantics(t *testing.T) { t.Fatal(err) } _, err = sqlDB.Exec(`ALTER TABLE rel2 RENAME TO rel`) - if testutils.IsError(err, sqlerrors.NewRelationAlreadyExistsError("rel").Error()) { + if testutils.IsError(err, sqlerrors.NewRelationAlreadyExistsError("test.public.rel").Error()) { t.Fatalf("unexpected error %v", err) } @@ -183,7 +183,7 @@ func TestNamespaceTableSemantics(t *testing.T) { t.Fatal(err) } _, err = sqlDB.Exec(`ALTER SEQUENCE rel2 RENAME TO rel`) - if !testutils.IsError(err, sqlerrors.NewRelationAlreadyExistsError("rel").Error()) { + if !testutils.IsError(err, sqlerrors.NewRelationAlreadyExistsError("defaultdb.public.rel").Error()) { t.Fatalf("unexpected error %v", err) } diff --git a/pkg/sql/pg_catalog.go b/pkg/sql/pg_catalog.go index e159eb00484d..1132aa0813e3 100644 --- a/pkg/sql/pg_catalog.go +++ b/pkg/sql/pg_catalog.go @@ -1211,15 +1211,7 @@ func (r oneAtATimeSchemaResolver) getTableByID(id descpb.ID) (catalog.TableDescr func (r oneAtATimeSchemaResolver) getSchemaByID(id descpb.ID) (*schemadesc.Immutable, error) { // TODO (rohany): This should use the descs.Collection. - desc, err := catalogkv.GetAnyDescriptorByID(r.ctx, r.p.txn, r.p.ExecCfg().Codec, id, catalogkv.Immutable) - if err != nil { - return nil, err - } - sc, ok := desc.(*schemadesc.Immutable) - if !ok { - return nil, sqlerrors.NewUndefinedSchemaError(fmt.Sprintf("[%d]", id)) - } - return sc, nil + return catalogkv.MustGetSchemaDescByID(r.ctx, r.p.txn, r.p.ExecCfg().Codec, id) } // makeAllRelationsVirtualTableWithDescriptorIDIndex creates a virtual table that searches through diff --git a/pkg/sql/pgwire/pgwire_test.go b/pkg/sql/pgwire/pgwire_test.go index 32ab2cd54a5e..0ee9586276ea 100644 --- a/pkg/sql/pgwire/pgwire_test.go +++ b/pkg/sql/pgwire/pgwire_test.go @@ -1061,7 +1061,7 @@ func TestPGPreparedExec(t *testing.T) { "CREATE TABLE d.public.t (i INT, s STRING, d INT)", []preparedExecTest{ baseTest, - baseTest.Error(`pq: relation "t" already exists`), + baseTest.Error(`pq: relation "d.public.t" already exists`), }, }, { diff --git a/pkg/sql/privileged_accessor.go b/pkg/sql/privileged_accessor.go index f8873ba1a385..8ab1734c7bf0 100644 --- a/pkg/sql/privileged_accessor.go +++ b/pkg/sql/privileged_accessor.go @@ -100,7 +100,7 @@ func (p *planner) LookupZoneConfigByNamespaceID( // to check the permissions of a descriptor given its ID, or the id given // is not a descriptor of a table or database. func (p *planner) checkDescriptorPermissions(ctx context.Context, id descpb.ID) error { - desc, err := catalogkv.GetAnyDescriptorByID(ctx, p.txn, p.ExecCfg().Codec, id, catalogkv.Immutable) + desc, err := catalogkv.GetDescriptorByID(ctx, p.txn, p.ExecCfg().Codec, id) if err != nil { return err } diff --git a/pkg/sql/rename_table.go b/pkg/sql/rename_table.go index fc791572c27e..0f1ff0d7e6b6 100644 --- a/pkg/sql/rename_table.go +++ b/pkg/sql/rename_table.go @@ -85,7 +85,7 @@ func (p *planner) RenameTable(ctx context.Context, n *tree.RenameTable) (planNod for _, dependent := range tableDesc.DependedOnBy { if !dependent.ByID { return nil, p.dependentViewError( - ctx, tableDesc.TypeName(), oldTn.String(), + ctx, string(tableDesc.DescriptorType()), oldTn.String(), tableDesc.ParentID, dependent.ID, "rename", ) } @@ -196,18 +196,15 @@ func (n *renameTableNode) startExec(params runParams) error { return nil } - exists, id, err := catalogkv.LookupObjectID( - params.ctx, params.p.txn, p.ExecCfg().Codec, targetDbDesc.GetID(), targetSchemaDesc.ID, newTn.Table(), + err := catalogkv.CheckObjectCollision( + params.ctx, + params.p.txn, + p.ExecCfg().Codec, + targetDbDesc.GetID(), + targetSchemaDesc.ID, + newTn, ) - if err == nil && exists { - // Try and see what kind of object we collided with. - desc, err := catalogkv.GetAnyDescriptorByID( - params.ctx, params.p.txn, p.ExecCfg().Codec, id, catalogkv.Immutable) - if err != nil { - return sqlerrors.WrapErrorWhileConstructingObjectAlreadyExistsErr(err) - } - return sqlerrors.MakeObjectAlreadyExistsError(desc.DescriptorProto(), newTn.Table()) - } else if err != nil { + if err != nil { return err } @@ -237,17 +234,6 @@ func (n *renameTableNode) startExec(params runParams) error { return err } - if err == nil && exists { - // Try and see what kind of object we collided with. - desc, err := catalogkv.GetAnyDescriptorByID(params.ctx, params.p.txn, p.ExecCfg().Codec, id, catalogkv.Immutable) - if err != nil { - return sqlerrors.WrapErrorWhileConstructingObjectAlreadyExistsErr(err) - } - return sqlerrors.MakeObjectAlreadyExistsError(desc.DescriptorProto(), newTn.Table()) - } else if err != nil { - return err - } - newTbKey := catalogkv.MakeObjectNameKey(ctx, params.ExecCfg().Settings, targetDbDesc.GetID(), tableDesc.GetParentSchemaID(), newTn.Table()) diff --git a/pkg/sql/rename_test.go b/pkg/sql/rename_test.go index 642f9a3f91b9..8b942ed17090 100644 --- a/pkg/sql/rename_test.go +++ b/pkg/sql/rename_test.go @@ -193,7 +193,7 @@ CREATE TABLE test.t (a INT PRIMARY KEY); // name is not deleted from the database until the async schema changer checks // that there's no more leases on the old version). if _, err := db.Exec("CREATE TABLE test.t (a INT PRIMARY KEY)"); !testutils.IsError( - err, `relation "t" already exists`) { + err, `relation "test.public.t" already exists`) { t.Fatal(err) } diff --git a/pkg/sql/repair.go b/pkg/sql/repair.go index a12eab24071e..74d8a5078b72 100644 --- a/pkg/sql/repair.go +++ b/pkg/sql/repair.go @@ -18,6 +18,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc" @@ -110,41 +111,38 @@ func (p *planner) UnsafeUpsertDescriptor( previousUserPrivileges = existing.GetPrivileges().Users } - var objectType privilege.ObjectType + tbl, db, typ, schema := descpb.FromDescriptor(&desc) switch md := existing.(type) { case *tabledesc.Mutable: - md.TableDescriptor = *desc.GetTable() // nolint:descriptormarshal - objectType = privilege.Table + md.TableDescriptor = *tbl case *schemadesc.Mutable: - md.SchemaDescriptor = *desc.GetSchema() - objectType = privilege.Schema + md.SchemaDescriptor = *schema case *dbdesc.Mutable: - md.DatabaseDescriptor = *desc.GetDatabase() - objectType = privilege.Database + md.DatabaseDescriptor = *db case *typedesc.Mutable: - md.TypeDescriptor = *desc.GetType() - objectType = privilege.Type + md.TypeDescriptor = *typ case nil: - // nolint:descriptormarshal - if tableDesc := desc.GetTable(); tableDesc != nil { - existing = tabledesc.NewCreatedMutable(*tableDesc) - objectType = privilege.Table - } else if schemaDesc := desc.GetSchema(); schemaDesc != nil { - existing = schemadesc.NewCreatedMutable(*schemaDesc) - objectType = privilege.Schema - } else if dbDesc := desc.GetDatabase(); dbDesc != nil { - existing = dbdesc.NewCreatedMutable(*dbDesc) - objectType = privilege.Database - } else if typeDesc := desc.GetType(); typeDesc != nil { - existing = typedesc.NewCreatedMutable(*typeDesc) - objectType = privilege.Type - } else { + b := catalogkv.NewBuilder(&desc) + if b == nil { return pgerror.New(pgcode.InvalidTableDefinition, "invalid ") } + existing = b.BuildCreatedMutable() default: return errors.AssertionFailedf("unknown descriptor type %T for id %d", existing, id) } + objectType := privilege.Any + switch existing.DescriptorType() { + case catalog.Database: + objectType = privilege.Database + case catalog.Table: + objectType = privilege.Table + case catalog.Type: + objectType = privilege.Type + case catalog.Schema: + objectType = privilege.Schema + } + if force { p.Descriptors().SkipValidationOnWrite() } @@ -415,7 +413,7 @@ func (p *planner) UnsafeUpsertNamespaceEntry( if invalid { return pgerror.Newf(pgcode.InvalidCatalogName, "invalid prefix (%d, %d) for %s %d", - parentID, parentSchemaID, desc.TypeName(), descID) + parentID, parentSchemaID, desc.DescriptorType(), descID) } return nil } @@ -569,7 +567,7 @@ func (p *planner) UnsafeDeleteDescriptor(ctx context.Context, descID int64, forc mut, err := p.Descriptors().GetMutableDescriptorByID(ctx, id, p.txn) var forceNoticeString string // for the event if err != nil { - if !errors.Is(err, catalog.ErrDescriptorNotFound) && force { + if force { notice := pgnotice.NewWithSeverityf("WARNING", "failed to retrieve existing descriptor, continuing with force flag: %v", err) p.BufferClientNotice(ctx, notice) diff --git a/pkg/sql/reparent_database.go b/pkg/sql/reparent_database.go index 08dba56b8545..cd3d81576c71 100644 --- a/pkg/sql/reparent_database.go +++ b/pkg/sql/reparent_database.go @@ -86,7 +86,7 @@ func (p *planner) ReparentDatabase( return nil, err } if exists { - return nil, pgerror.Newf(pgcode.DuplicateSchema, "schema %q already exists", db.Name) + return nil, sqlerrors.NewSchemaAlreadyExistsError(db.Name) } // Ensure the database has a valid schema name. @@ -114,25 +114,25 @@ func (n *reparentDatabaseNode) startExec(params runParams) error { if err != nil { return err } - schema := schemadesc.NewCreatedMutable(descpb.SchemaDescriptor{ + schema := schemadesc.NewBuilder(&descpb.SchemaDescriptor{ ParentID: n.newParent.ID, Name: n.db.Name, ID: id, Privileges: protoutil.Clone(n.db.Privileges).(*descpb.PrivilegeDescriptor), Version: 1, - }) + }).BuildCreatedMutable() // Add the new schema to the parent database's name map. if n.newParent.Schemas == nil { n.newParent.Schemas = make(map[string]descpb.DatabaseDescriptor_SchemaInfo) } n.newParent.Schemas[n.db.Name] = descpb.DatabaseDescriptor_SchemaInfo{ - ID: schema.ID, + ID: schema.GetID(), Dropped: false, } if err := p.createDescriptorWithID( ctx, - catalogkeys.NewSchemaKey(n.newParent.ID, schema.Name).Key(p.ExecCfg().Codec), + catalogkeys.NewSchemaKey(n.newParent.ID, schema.GetName()).Key(p.ExecCfg().Codec), id, schema, params.ExecCfg().Settings, @@ -213,7 +213,7 @@ func (n *reparentDatabaseNode) startExec(params runParams) error { Name: tbl.Name, }) tbl.ParentID = n.newParent.ID - tbl.UnexposedParentSchemaID = schema.ID + tbl.UnexposedParentSchemaID = schema.GetID() objKey := catalogkv.MakeObjectNameKey(ctx, p.ExecCfg().Settings, tbl.ParentID, tbl.GetParentSchemaID(), tbl.Name).Key(codec) b.CPut(objKey, tbl.ID, nil /* expected */) if err := p.writeSchemaChange(ctx, tbl, descpb.InvalidMutationID, tree.AsStringWithFQNames(n.n, params.Ann())); err != nil { @@ -253,7 +253,7 @@ func (n *reparentDatabaseNode) startExec(params runParams) error { Name: typ.Name, }) typ.ParentID = n.newParent.ID - typ.ParentSchemaID = schema.ID + typ.ParentSchemaID = schema.GetID() objKey := catalogkv.MakeObjectNameKey(ctx, p.ExecCfg().Settings, typ.ParentID, typ.ParentSchemaID, typ.Name).Key(codec) b.CPut(objKey, typ.ID, nil /* expected */) if err := p.writeTypeSchemaChange(ctx, typ, tree.AsStringWithFQNames(n.n, params.Ann())); err != nil { diff --git a/pkg/sql/resolver.go b/pkg/sql/resolver.go index 449364c14ecb..f35fe69524ad 100644 --- a/pkg/sql/resolver.go +++ b/pkg/sql/resolver.go @@ -17,6 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" @@ -722,7 +723,7 @@ func (p *planner) getTableAndIndex( return nil, nil, err } optIdx := idx.(*optIndex) - return tabledesc.NewExistingMutable(*optIdx.tab.desc.TableDesc()), optIdx.desc, nil + return tabledesc.NewBuilder(optIdx.tab.desc.TableDesc()).BuildExistingMutableTable(), optIdx.desc, nil } // expandTableGlob expands pattern into a list of objects represented @@ -829,17 +830,7 @@ func newInternalLookupCtxFromDescriptors( ) (*internalLookupCtx, error) { descriptors := make([]catalog.Descriptor, len(rawDescs)) for i := range rawDescs { - desc := &rawDescs[i] - switch t := desc.Union.(type) { - case *descpb.Descriptor_Database: - descriptors[i] = dbdesc.NewImmutable(*t.Database) - case *descpb.Descriptor_Table: - descriptors[i] = tabledesc.NewImmutable(*t.Table) - case *descpb.Descriptor_Type: - descriptors[i] = typedesc.NewImmutable(*t.Type) - case *descpb.Descriptor_Schema: - descriptors[i] = schemadesc.NewImmutable(*t.Schema) - } + descriptors[i] = catalogkv.NewBuilder(&rawDescs[i]).BuildImmutable() } lCtx := newInternalLookupCtx(ctx, descriptors, prefix, nil /* fallback */) if err := descs.HydrateGivenDescriptors(ctx, descriptors); err != nil { diff --git a/pkg/sql/row/expr_walker_test.go b/pkg/sql/row/expr_walker_test.go index 9243081c3cf6..9b5156415a55 100644 --- a/pkg/sql/row/expr_walker_test.go +++ b/pkg/sql/row/expr_walker_test.go @@ -39,10 +39,10 @@ func createAndIncrementSeqDescriptor( seqOpts descpb.TableDescriptor_SequenceOpts, db *kv.DB, ) catalog.TableDescriptor { - desc := tabledesc.NewImmutable(descpb.TableDescriptor{ + desc := tabledesc.NewBuilder(&descpb.TableDescriptor{ ID: descpb.ID(id), SequenceOpts: &seqOpts, - }) + }).BuildImmutableTable() seqValueKey := codec.SequenceKey(uint32(desc.GetID())) _, err := kv.IncrementValRetryable( ctx, db, seqValueKey, incrementBy) diff --git a/pkg/sql/rowenc/index_encoding_test.go b/pkg/sql/rowenc/index_encoding_test.go index caa1e2c2defb..3d696157816b 100644 --- a/pkg/sql/rowenc/index_encoding_test.go +++ b/pkg/sql/rowenc/index_encoding_test.go @@ -107,7 +107,7 @@ func makeTableDescForTest(test indexKeyTest) (catalog.TableDescriptor, catalog.T Type: secondaryType, }}, } - return tabledesc.NewImmutable(tableDesc), colMap + return tabledesc.NewBuilder(&tableDesc).BuildImmutableTable(), colMap } func decodeIndex( diff --git a/pkg/sql/rowexec/bulk_row_writer.go b/pkg/sql/rowexec/bulk_row_writer.go index 5ff1b408eafd..b3d1a0a0d415 100644 --- a/pkg/sql/rowexec/bulk_row_writer.go +++ b/pkg/sql/rowexec/bulk_row_writer.go @@ -60,7 +60,7 @@ func newBulkRowWriterProcessor( flowCtx: flowCtx, processorID: processorID, batchIdxAtomic: 0, - tableDesc: tabledesc.NewImmutable(spec.Table), + tableDesc: tabledesc.NewBuilder(&spec.Table).BuildImmutableTable(), spec: spec, input: input, output: output, diff --git a/pkg/sql/rowexec/columnbackfiller.go b/pkg/sql/rowexec/columnbackfiller.go index c27d7259092f..62e5e40979fc 100644 --- a/pkg/sql/rowexec/columnbackfiller.go +++ b/pkg/sql/rowexec/columnbackfiller.go @@ -46,7 +46,7 @@ func newColumnBackfiller( columnBackfillerMon := execinfra.NewMonitor(ctx, flowCtx.Cfg.BackfillerMonitor, "column-backfill-mon") cb := &columnBackfiller{ - desc: tabledesc.NewImmutable(spec.Table), + desc: tabledesc.NewBuilder(&spec.Table).BuildImmutableTable(), backfiller: backfiller{ name: "Column", filter: backfill.ColumnMutationFilter, diff --git a/pkg/sql/rowexec/indexbackfiller.go b/pkg/sql/rowexec/indexbackfiller.go index ed7013209230..a99f2832d095 100644 --- a/pkg/sql/rowexec/indexbackfiller.go +++ b/pkg/sql/rowexec/indexbackfiller.go @@ -85,7 +85,7 @@ func newIndexBackfiller( indexBackfillerMon := execinfra.NewMonitor(ctx, flowCtx.Cfg.BackfillerMonitor, "index-backfill-mon") ib := &indexBackfiller{ - desc: tabledesc.NewImmutable(spec.Table), + desc: tabledesc.NewBuilder(&spec.Table).BuildImmutableTable(), spec: spec, flowCtx: flowCtx, output: output, diff --git a/pkg/sql/rowexec/inverted_joiner.go b/pkg/sql/rowexec/inverted_joiner.go index 66ccdc7b8e26..c6f17bc95967 100644 --- a/pkg/sql/rowexec/inverted_joiner.go +++ b/pkg/sql/rowexec/inverted_joiner.go @@ -189,7 +189,7 @@ func newInvertedJoiner( return nil, errors.AssertionFailedf("unexpected inverted join type %s", spec.Type) } ij := &invertedJoiner{ - desc: tabledesc.NewImmutable(spec.Table), + desc: tabledesc.NewBuilder(&spec.Table).BuildImmutableTable(), input: input, inputTypes: input.OutputTypes(), prefixEqualityCols: spec.PrefixEqualityColumns, diff --git a/pkg/sql/rowexec/joinreader.go b/pkg/sql/rowexec/joinreader.go index c790e105c30d..c1befb899acb 100644 --- a/pkg/sql/rowexec/joinreader.go +++ b/pkg/sql/rowexec/joinreader.go @@ -204,7 +204,7 @@ func newJoinReader( return nil, errors.Errorf("unsupported joinReaderType") } jr := &joinReader{ - desc: tabledesc.NewImmutable(spec.Table), + desc: tabledesc.NewBuilder(&spec.Table).BuildImmutableTable(), maintainOrdering: spec.MaintainOrdering, input: input, lookupCols: lookupCols, diff --git a/pkg/sql/rowexec/scrub_tablereader.go b/pkg/sql/rowexec/scrub_tablereader.go index bd371067b49a..20afa37d39fa 100644 --- a/pkg/sql/rowexec/scrub_tablereader.go +++ b/pkg/sql/rowexec/scrub_tablereader.go @@ -79,7 +79,7 @@ func newScrubTableReader( indexIdx: int(spec.IndexIdx), } - tr.tableDesc = tabledesc.NewImmutable(spec.Table) + tr.tableDesc = tabledesc.NewBuilder(&spec.Table).BuildImmutableTable() tr.limitHint = execinfra.LimitHint(spec.LimitHint, post) if err := tr.Init( diff --git a/pkg/sql/rowexec/tablereader.go b/pkg/sql/rowexec/tablereader.go index cc9e1791a297..ca2694e47466 100644 --- a/pkg/sql/rowexec/tablereader.go +++ b/pkg/sql/rowexec/tablereader.go @@ -88,7 +88,7 @@ func newTableReader( tr.parallelize = spec.Parallelize && tr.limitHint == 0 tr.maxTimestampAge = time.Duration(spec.MaxTimestampAgeNanos) - tableDesc := tabledesc.NewImmutable(spec.Table) + tableDesc := tabledesc.NewBuilder(&spec.Table).BuildImmutableTable() virtualColumn := tabledesc.FindVirtualColumn(tableDesc, spec.VirtualColumn) cols := tableDesc.PublicColumns() if spec.Visibility == execinfra.ScanVisibilityPublicAndNotPublic { diff --git a/pkg/sql/rowexec/zigzagjoiner.go b/pkg/sql/rowexec/zigzagjoiner.go index 3d1f760a6369..2334be536eae 100644 --- a/pkg/sql/rowexec/zigzagjoiner.go +++ b/pkg/sql/rowexec/zigzagjoiner.go @@ -289,7 +289,7 @@ func newZigzagJoiner( // TODO(ajwerner): Utilize a cached copy of these tables. tables := make([]catalog.TableDescriptor, len(spec.Tables)) for i := range spec.Tables { - tables[i] = tabledesc.NewImmutable(spec.Tables[i]) + tables[i] = tabledesc.NewBuilder(&spec.Tables[i]).BuildImmutableTable() } leftColumnTypes := catalog.ColumnTypes(tables[0].PublicColumns()) rightColumnTypes := catalog.ColumnTypes(tables[1].PublicColumns()) diff --git a/pkg/sql/schema_changer.go b/pkg/sql/schema_changer.go index a3a1cc038c8c..f0f97168504e 100644 --- a/pkg/sql/schema_changer.go +++ b/pkg/sql/schema_changer.go @@ -534,17 +534,8 @@ func (sc *SchemaChanger) notFirstInLine(ctx context.Context, desc catalog.Descri func (sc *SchemaChanger) getTargetDescriptor(ctx context.Context) (catalog.Descriptor, error) { // Retrieve the descriptor that is being changed. var desc catalog.Descriptor - if err := sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - var err error - desc, err = catalogkv.GetDescriptorByID( - ctx, - txn, - sc.execCfg.Codec, - sc.descID, - catalogkv.Immutable, - catalogkv.AnyDescriptorKind, - true, /* required */ - ) + if err := sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { + desc, err = catalogkv.MustGetDescriptorByID(ctx, txn, sc.execCfg.Codec, sc.descID) return err }); err != nil { return nil, err diff --git a/pkg/sql/schema_changer_test.go b/pkg/sql/schema_changer_test.go index 805f82252e1b..101839fcd19e 100644 --- a/pkg/sql/schema_changer_test.go +++ b/pkg/sql/schema_changer_test.go @@ -458,7 +458,8 @@ func TestRollbackOfAddingTable(t *testing.T) { require.NoError(t, row.Scan(&descBytes)) var desc descpb.Descriptor require.NoError(t, protoutil.Unmarshal(descBytes, &desc)) - viewDesc := desc.GetTable() //nolint:descriptormarshal + //nolint:descriptormarshal + viewDesc := desc.GetTable() require.Equal(t, "v", viewDesc.GetName(), "read a different descriptor than expected") require.Equal(t, descpb.DescriptorState_DROP, viewDesc.GetState()) diff --git a/pkg/sql/schemachanger/scexec/executor_external_test.go b/pkg/sql/schemachanger/scexec/executor_external_test.go index 67aba6af8414..513af005688e 100644 --- a/pkg/sql/schemachanger/scexec/executor_external_test.go +++ b/pkg/sql/schemachanger/scexec/executor_external_test.go @@ -81,8 +81,7 @@ func TestExecutorDescriptorMutationOps(t *testing.T) { var table *tabledesc.Mutable makeTable := func(f func(mutable *tabledesc.Mutable)) func() catalog.TableDescriptor { return func() catalog.TableDescriptor { - cpy := tabledesc.NewExistingMutable( - *table.ImmutableCopy().(catalog.TableDescriptor).TableDesc()) + cpy := tabledesc.NewBuilder(table.TableDesc()).BuildExistingMutableTable() if f != nil { f(cpy) } diff --git a/pkg/sql/scrub_fk.go b/pkg/sql/scrub_fk.go index fd856a6004d0..3a002b25eac5 100644 --- a/pkg/sql/scrub_fk.go +++ b/pkg/sql/scrub_fk.go @@ -53,7 +53,7 @@ func newSQLForeignKeyCheckOperation( tableName: tableName, tableDesc: tableDesc, constraint: &constraint, - referencedTableDesc: tabledesc.NewImmutable(*constraint.ReferencedTable), + referencedTableDesc: tabledesc.NewBuilder(constraint.ReferencedTable).BuildImmutableTable(), asOf: asOf, } } diff --git a/pkg/sql/set_schema.go b/pkg/sql/set_schema.go index 4d2142c884ff..d1d2a0e13dea 100644 --- a/pkg/sql/set_schema.go +++ b/pkg/sql/set_schema.go @@ -21,7 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/privilege" - "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" + "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" ) // prepareSetSchema verifies that a table/type can be set to the desired @@ -30,8 +30,12 @@ func (p *planner) prepareSetSchema( ctx context.Context, desc catalog.MutableDescriptor, schema string, ) (descpb.ID, error) { + var objectName tree.ObjectName switch t := desc.(type) { - case *tabledesc.Mutable, *typedesc.Mutable: + case *tabledesc.Mutable: + objectName = tree.NewUnqualifiedTableName(tree.Name(desc.GetName())) + case *typedesc.Mutable: + objectName = tree.NewUnqualifiedTypeName(tree.Name(desc.GetName())) default: return 0, pgerror.Newf( pgcode.InvalidParameterValue, @@ -74,15 +78,9 @@ func (p *planner) prepareSetSchema( return desiredSchemaID, nil } - exists, id, err := catalogkv.LookupObjectID( - ctx, p.txn, p.ExecCfg().Codec, databaseID, desiredSchemaID, desc.GetName(), - ) - if err == nil && exists { - collidingDesc, err := catalogkv.GetAnyDescriptorByID(ctx, p.txn, p.ExecCfg().Codec, id, catalogkv.Immutable) - if err != nil { - return 0, sqlerrors.WrapErrorWhileConstructingObjectAlreadyExistsErr(err) - } - return 0, sqlerrors.MakeObjectAlreadyExistsError(collidingDesc.DescriptorProto(), desc.GetName()) + err = catalogkv.CheckObjectCollision(ctx, p.txn, p.ExecCfg().Codec, databaseID, desiredSchemaID, objectName) + if err != nil { + return descpb.InvalidID, err } return desiredSchemaID, nil diff --git a/pkg/sql/set_zone_config.go b/pkg/sql/set_zone_config.go index 04f926f818ba..314ae241280b 100644 --- a/pkg/sql/set_zone_config.go +++ b/pkg/sql/set_zone_config.go @@ -25,7 +25,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/sql/catalog" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" @@ -247,7 +246,7 @@ func checkPrivilegeForSetZoneConfig(ctx context.Context, p *planner, zs tree.Zon return pgerror.Newf(pgcode.InsufficientPrivilege, "user %s does not have %s or %s privilege on %s %s", - p.SessionData().User(), privilege.ZONECONFIG, privilege.CREATE, dbDesc.TypeName(), dbDesc.GetName()) + p.SessionData().User(), privilege.ZONECONFIG, privilege.CREATE, dbDesc.DescriptorType(), dbDesc.GetName()) } tableDesc, err := p.resolveTableForZone(ctx, &zs) if err != nil { @@ -270,7 +269,7 @@ func checkPrivilegeForSetZoneConfig(ctx context.Context, p *planner, zs tree.Zon return pgerror.Newf(pgcode.InsufficientPrivilege, "user %s does not have %s or %s privilege on %s %s", - p.SessionData().User(), privilege.ZONECONFIG, privilege.CREATE, tableDesc.TypeName(), tableDesc.GetName()) + p.SessionData().User(), privilege.ZONECONFIG, privilege.CREATE, tableDesc.DescriptorType(), tableDesc.GetName()) } // setZoneConfigRun contains the run-time state of setZoneConfigNode during local execution. @@ -960,21 +959,14 @@ func RemoveIndexZoneConfigs( ctx context.Context, txn *kv.Txn, execCfg *ExecutorConfig, - tableID descpb.ID, + tableDesc catalog.TableDescriptor, indexDescs []descpb.IndexDescriptor, ) error { if !execCfg.Codec.ForSystemTenant() { // Tenants are agnostic to zone configs. return nil } - desc, err := catalogkv.GetDescriptorByID(ctx, txn, execCfg.Codec, tableID, - catalogkv.Mutable, catalogkv.TableDescriptorKind, true) - if err != nil { - return err - } - tableDesc := desc.(catalog.TableDescriptor) - - zone, err := getZoneConfigRaw(ctx, txn, execCfg.Codec, tableID) + zone, err := getZoneConfigRaw(ctx, txn, execCfg.Codec, tableDesc.GetID()) if err != nil { return err } @@ -988,7 +980,7 @@ func RemoveIndexZoneConfigs( } // Ignore CCL required error to allow schema change to progress. - _, err = writeZoneConfig(ctx, txn, tableID, tableDesc, zone, execCfg, false /* hasNewSubzones */) + _, err = writeZoneConfig(ctx, txn, tableDesc.GetID(), tableDesc, zone, execCfg, false /* hasNewSubzones */) if err != nil && !sqlerrors.IsCCLRequiredError(err) { return err } diff --git a/pkg/sql/show_create.go b/pkg/sql/show_create.go index 3670d0001b15..418d4267ea44 100644 --- a/pkg/sql/show_create.go +++ b/pkg/sql/show_create.go @@ -256,6 +256,11 @@ func (p *planner) ShowCreate( if lErr != nil { return "", lErr } + // Overwrite desc with hydrated descriptor. + desc, err = lCtx.getTableByID(desc.GetID()) + if err != nil { + return "", err + } stmt, err = ShowCreateTable(ctx, p, &tn, dbPrefix, desc, lCtx, displayOptions) } diff --git a/pkg/sql/sqlerrors/errors.go b/pkg/sql/sqlerrors/errors.go index f700a4f646b6..e3d31b892bf3 100644 --- a/pkg/sql/sqlerrors/errors.go +++ b/pkg/sql/sqlerrors/errors.go @@ -132,6 +132,11 @@ func NewDatabaseAlreadyExistsError(name string) error { return pgerror.Newf(pgcode.DuplicateDatabase, "database %q already exists", name) } +// NewSchemaAlreadyExistsError creates an error for a preexisting schema. +func NewSchemaAlreadyExistsError(name string) error { + return pgerror.Newf(pgcode.DuplicateSchema, "schema %q already exists", name) +} + // WrapErrorWhileConstructingObjectAlreadyExistsErr is used to wrap an error // when an error occurs while trying to get the colliding object for an // ObjectAlreadyExistsErr. @@ -150,8 +155,7 @@ func MakeObjectAlreadyExistsError(collidingObject *descpb.Descriptor, name strin case *descpb.Descriptor_Database: return NewDatabaseAlreadyExistsError(name) case *descpb.Descriptor_Schema: - // TODO(ajwerner): Add a case for an existing schema object. - return errors.AssertionFailedf("schema exists with name %v", name) + return NewSchemaAlreadyExistsError(name) default: return errors.AssertionFailedf("unknown type %T exists with name %v", collidingObject.Union, name) } diff --git a/pkg/sql/table_test.go b/pkg/sql/table_test.go index 2e527f35e474..3f668260c37e 100644 --- a/pkg/sql/table_test.go +++ b/pkg/sql/table_test.go @@ -417,14 +417,9 @@ CREATE TABLE test.tt (x test.t); desc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "tt") typLookup := func(ctx context.Context, id descpb.ID) (tree.TypeName, catalog.TypeDescriptor, error) { var typeDesc catalog.TypeDescriptor - if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - desc, err := catalogkv.GetDescriptorByID(ctx, txn, keys.SystemSQLCodec, id, - catalogkv.Immutable, catalogkv.TypeDescriptorKind, true /* required */) - if err != nil { - return err - } - typeDesc = desc.(catalog.TypeDescriptor) - return nil + if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { + typeDesc, err = catalogkv.MustGetTypeDescByID(ctx, txn, keys.SystemSQLCodec, id) + return err }); err != nil { return tree.TypeName{}, nil, err } diff --git a/pkg/sql/tests/repair_test.go b/pkg/sql/tests/repair_test.go index fd9e8bac2eb3..84a64fe386af 100644 --- a/pkg/sql/tests/repair_test.go +++ b/pkg/sql/tests/repair_test.go @@ -103,7 +103,7 @@ func TestDescriptorRepairOrphanedDescriptors(t *testing.T) { // the dance of adding back a parent database in order to drop the table. require.NoError(t, crdb.ExecuteTx(ctx, db, nil, func(tx *gosql.Tx) error { if _, err := tx.Exec( - "SELECT crdb_internal.unsafe_delete_descriptor($1);", + "SELECT crdb_internal.unsafe_delete_descriptor($1, true);", descID); err != nil { return err } @@ -633,9 +633,9 @@ SELECT crdb_internal.unsafe_upsert_descriptor(59, crdb_internal.json_to_pb('cock "owner_proto": "root", "users": [ { "privileges": 2, "user_proto": "admin" }, - { "privileges": 2, "user_proto": "root" }, { "privileges": 2, "user_proto": "newuser1" }, - { "privileges": 2, "user_proto": "newuser2" } + { "privileges": 2, "user_proto": "newuser2" }, + { "privileges": 2, "user_proto": "root" } ], "version": 1 }, diff --git a/pkg/sql/type_change.go b/pkg/sql/type_change.go index 722a20ba3881..5bad1b35ea30 100644 --- a/pkg/sql/type_change.go +++ b/pkg/sql/type_change.go @@ -204,14 +204,9 @@ func (TypeSchemaChangerTestingKnobs) ModuleTestingKnobs() {} func (t *typeSchemaChanger) getTypeDescFromStore(ctx context.Context) (*typedesc.Immutable, error) { var typeDesc *typedesc.Immutable - if err := t.execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - desc, err := catalogkv.GetDescriptorByID(ctx, txn, t.execCfg.Codec, t.typeID, - catalogkv.Immutable, catalogkv.TypeDescriptorKind, true /* required */) - if err != nil { - return err - } - typeDesc = desc.(*typedesc.Immutable) - return nil + if err := t.execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { + typeDesc, err = catalogkv.MustGetTypeDescByID(ctx, txn, t.execCfg.Codec, t.typeID) + return err }); err != nil { return nil, err } diff --git a/pkg/sql/virtual_schema.go b/pkg/sql/virtual_schema.go index b92d1e7ab187..1b674fbf2d15 100644 --- a/pkg/sql/virtual_schema.go +++ b/pkg/sql/virtual_schema.go @@ -334,7 +334,7 @@ func (v *virtualSchemaEntry) GetObjectByName( if def, ok := v.defs[name]; ok { if flags.RequireMutable { return &mutableVirtualDefEntry{ - desc: tabledesc.NewExistingMutable(*def.desc.TableDesc()), + desc: tabledesc.NewBuilder(def.desc.TableDesc()).BuildExistingMutableTable(), }, nil } return def, nil @@ -652,7 +652,7 @@ func NewVirtualSchemaHolder( return nil, errors.NewAssertionErrorWithWrappedErrf(err, "programmer error") } } - td := tabledesc.NewImmutable(tableDesc) + td := tabledesc.NewBuilder(&tableDesc).BuildImmutableTable() if err := catalog.ValidateSelf(td); err != nil { return nil, errors.NewAssertionErrorWithWrappedErrf(err, "failed to validate virtual table %s: programmer error", errors.Safe(td.GetName())) @@ -694,12 +694,12 @@ var publicSelectPrivileges = descpb.NewPrivilegeDescriptor( ) func initVirtualDatabaseDesc(id descpb.ID, name string) *dbdesc.Immutable { - return dbdesc.NewImmutable(descpb.DatabaseDescriptor{ + return dbdesc.NewBuilder(&descpb.DatabaseDescriptor{ Name: name, ID: id, Version: 1, Privileges: publicSelectPrivileges, - }) + }).BuildImmutableDatabase() } // getEntries is part of the VirtualTabler interface. diff --git a/pkg/sql/zone_config.go b/pkg/sql/zone_config.go index 4f040fbf86b7..0dd9fc433002 100644 --- a/pkg/sql/zone_config.go +++ b/pkg/sql/zone_config.go @@ -93,7 +93,8 @@ func getZoneConfig( if err := descVal.GetProto(&desc); err != nil { return 0, nil, 0, nil, err } - if tableDesc := descpb.TableFromDescriptor(&desc, descVal.Timestamp); tableDesc != nil { + tableDesc, _, _, _ := descpb.FromDescriptorWithMVCCTimestamp(&desc, descVal.Timestamp) + if tableDesc != nil { // This is a table descriptor. Look up its parent database zone config. dbID, zone, _, _, err := getZoneConfig(config.SystemTenantObjectID(tableDesc.ParentID), getKey, false /* getInheritedDefault */) if err != nil { @@ -139,7 +140,8 @@ func completeZoneConfig( if err := descVal.GetProto(&desc); err != nil { return err } - if tableDesc := descpb.TableFromDescriptor(&desc, descVal.Timestamp); tableDesc != nil { + tableDesc, _, _, _ := descpb.FromDescriptorWithMVCCTimestamp(&desc, descVal.Timestamp) + if tableDesc != nil { _, dbzone, _, _, err := getZoneConfig(config.SystemTenantObjectID(tableDesc.ParentID), getKey, false /* getInheritedDefault */) if err != nil { return err diff --git a/pkg/sql/zone_config_test.go b/pkg/sql/zone_config_test.go index a647840be45c..c143cddaf3db 100644 --- a/pkg/sql/zone_config_test.go +++ b/pkg/sql/zone_config_test.go @@ -73,8 +73,9 @@ func waitForConfigChange(t testing.TB, s *server.TestServer) *config.SystemConfi if err := val.GetProto(&foundDesc); err != nil { t.Fatal(err) } - if id := foundDesc.GetDatabase().GetID(); id != configID { - return errors.Errorf("expected database id %d; got %d", configID, id) + _, db, _, _ := descpb.FromDescriptor(&foundDesc) + if db.ID != configID { + return errors.Errorf("expected database id %d; got %d", configID, db.ID) } return nil } diff --git a/pkg/sqlmigrations/migrations.go b/pkg/sqlmigrations/migrations.go index d2a09510947b..bed286bb12cb 100644 --- a/pkg/sqlmigrations/migrations.go +++ b/pkg/sqlmigrations/migrations.go @@ -763,7 +763,8 @@ func createNewSystemNamespaceDescriptor(ctx context.Context, r runner) error { if err != nil { return err } - descpb.TableFromDescriptor(deprecatedDesc, ts).Name = systemschema.DeprecatedNamespaceTable.GetName() + deprecatedTable, _, _, _ := descpb.FromDescriptorWithMVCCTimestamp(deprecatedDesc, ts) + deprecatedTable.Name = systemschema.DeprecatedNamespaceTable.GetName() b.Put(deprecatedKey, deprecatedDesc) // The 19.2 namespace table contains an entry for "namespace" which maps to diff --git a/pkg/sqlmigrations/migrations_test.go b/pkg/sqlmigrations/migrations_test.go index d6e669996ba0..b23fcf48cb93 100644 --- a/pkg/sqlmigrations/migrations_test.go +++ b/pkg/sqlmigrations/migrations_test.go @@ -545,7 +545,7 @@ func TestCreateSystemTable(t *testing.T) { defer leaktest.AfterTest(t)() ctx := context.Background() - table := tabledesc.NewExistingMutable(*systemschema.NamespaceTable.TableDesc()) + table := tabledesc.NewBuilder(systemschema.NamespaceTable.TableDesc()).BuildExistingMutableTable() table.ID = keys.MaxReservedDescID prevPrivileges, ok := descpb.SystemAllowedPrivileges[table.ID] @@ -798,7 +798,7 @@ func TestMigrateNamespaceTableDescriptors(t *testing.T) { { ts, err := txn.GetProtoTs(ctx, key, desc) require.NoError(t, err) - table := descpb.TableFromDescriptor(desc, ts) + table, _, _, _ := descpb.FromDescriptorWithMVCCTimestamp(desc, ts) table.CreateAsOfTime = systemschema.NamespaceTable.GetCreateAsOfTime() table.ModificationTime = systemschema.NamespaceTable.GetModificationTime() require.True(t, table.Equal(systemschema.NamespaceTable.TableDesc())) @@ -806,7 +806,7 @@ func TestMigrateNamespaceTableDescriptors(t *testing.T) { { ts, err := txn.GetProtoTs(ctx, deprecatedKey, desc) require.NoError(t, err) - table := descpb.TableFromDescriptor(desc, ts) + table, _, _, _ := descpb.FromDescriptorWithMVCCTimestamp(desc, ts) table.CreateAsOfTime = systemschema.DeprecatedNamespaceTable.GetCreateAsOfTime() table.ModificationTime = systemschema.DeprecatedNamespaceTable.GetModificationTime() require.True(t, table.Equal(systemschema.DeprecatedNamespaceTable.TableDesc())) @@ -856,7 +856,7 @@ CREATE TABLE system.jobs ( require.Equal(t, oldPrimaryFamilyColumns, oldJobsTable.Families[0].ColumnNames) jobsTable := systemschema.JobsTable - systemschema.JobsTable = tabledesc.NewImmutable(*oldJobsTable.TableDesc()) + systemschema.JobsTable = tabledesc.NewBuilder(oldJobsTable.TableDesc()).BuildImmutableTable() defer func() { systemschema.JobsTable = jobsTable }() @@ -937,7 +937,7 @@ func TestVersionAlterSystemJobsAddSqllivenessColumnsAddNewSystemSqllivenessTable require.Equal(t, oldPrimaryFamilyColumns, oldJobsTable.Families[0].ColumnNames) jobsTable := systemschema.JobsTable - systemschema.JobsTable = tabledesc.NewImmutable(*oldJobsTable.TableDesc()) + systemschema.JobsTable = tabledesc.NewBuilder(oldJobsTable.TableDesc()).BuildImmutableTable() defer func() { systemschema.JobsTable = jobsTable }() diff --git a/pkg/testutils/lint/passes/forbiddenmethod/analyzers.go b/pkg/testutils/lint/passes/forbiddenmethod/analyzers.go index 318b1f2155c9..7fef0b5e0e3f 100644 --- a/pkg/testutils/lint/passes/forbiddenmethod/analyzers.go +++ b/pkg/testutils/lint/passes/forbiddenmethod/analyzers.go @@ -20,8 +20,8 @@ var descriptorMarshalOptions = Options{ Doc: `check for correct unmarshaling of descpb descriptors`, Package: "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb", Type: "Descriptor", - Method: "GetTable", - Hint: "see descpb.TableFromDescriptor()", + Method: "^Get(Table|Database|Type|Schema)$", + Hint: "see descpb.FromDescriptorWithMVCCTimestamp()", } var grpcClientConnCloseOptions = Options{ @@ -44,7 +44,7 @@ var grpcStatusWithDetailsOptions = Options{ } // DescriptorMarshalAnalyzer checks for correct unmarshaling of descpb -// descriptors by disallowing calls to (descpb.Descriptor).GetTable(). +// descriptors by disallowing calls to (descpb.Descriptor).GetTable() et al. var DescriptorMarshalAnalyzer = Analyzer(descriptorMarshalOptions) // GRPCClientConnCloseAnalyzer checks for calls to (*grpc.ClientConn).Close. diff --git a/pkg/testutils/lint/passes/forbiddenmethod/forbiddenmethod.go b/pkg/testutils/lint/passes/forbiddenmethod/forbiddenmethod.go index 210b8f9f4942..d786e22fb4f1 100644 --- a/pkg/testutils/lint/passes/forbiddenmethod/forbiddenmethod.go +++ b/pkg/testutils/lint/passes/forbiddenmethod/forbiddenmethod.go @@ -16,6 +16,7 @@ import ( "fmt" "go/ast" "go/types" + "regexp" "github.com/cockroachdb/cockroach/pkg/testutils/lint/passes/passesutil" "golang.org/x/tools/go/analysis" @@ -52,6 +53,7 @@ type Options struct { // Analyzer returns an Analyzer that vets against calls to the method // described in the provided Options. func Analyzer(options Options) *analysis.Analyzer { + methodRe := regexp.MustCompile(options.Method) return &analysis.Analyzer{ Name: options.PassName, Doc: options.Doc, @@ -91,7 +93,7 @@ func Analyzer(options Options) *analysis.Analyzer { }) } - if f.Pkg() == nil || f.Pkg().Path() != options.Package || f.Name() != options.Method { + if f.Pkg() == nil || f.Pkg().Path() != options.Package || !methodRe.MatchString(f.Name()) { return } if !isMethodForNamedType(f, options.Type) { @@ -103,7 +105,7 @@ func Analyzer(options Options) *analysis.Analyzer { } pass.Report(analysis.Diagnostic{ Pos: n.Pos(), - Message: fmt.Sprintf("Illegal call to %s.%s(), %s", options.Type, options.Method, options.Hint), + Message: fmt.Sprintf("Illegal call to %s.%s(), %s", options.Type, f.Name(), options.Hint), }) }) return nil, nil diff --git a/pkg/testutils/lint/passes/forbiddenmethod/testdata/src/descmarshaltest/foo.go b/pkg/testutils/lint/passes/forbiddenmethod/testdata/src/descmarshaltest/foo.go index 94e84af018c1..89e420e3f8d6 100644 --- a/pkg/testutils/lint/passes/forbiddenmethod/testdata/src/descmarshaltest/foo.go +++ b/pkg/testutils/lint/passes/forbiddenmethod/testdata/src/descmarshaltest/foo.go @@ -14,14 +14,38 @@ import "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" func F() { var d descpb.Descriptor - d.GetTable() // want `Illegal call to Descriptor.GetTable\(\), see descpb.TableFromDescriptor\(\)` + d.GetDatabase() // want `Illegal call to Descriptor.GetDatabase\(\), see descpb.FromDescriptorWithMVCCTimestamp\(\)` + + //nolint:descriptormarshal + d.GetDatabase() + + //nolint:descriptormarshal + d.GetDatabase() + + d.GetTable() // want `Illegal call to Descriptor.GetTable\(\), see descpb.FromDescriptorWithMVCCTimestamp\(\)` //nolint:descriptormarshal d.GetTable() - // nolint:descriptormarshal + //nolint:descriptormarshal d.GetTable() + d.GetType() // want `Illegal call to Descriptor.GetType\(\), see descpb.FromDescriptorWithMVCCTimestamp\(\)` + + //nolint:descriptormarshal + d.GetType() + + //nolint:descriptormarshal + d.GetType() + + d.GetSchema() // want `Illegal call to Descriptor.GetSchema\(\), see descpb.FromDescriptorWithMVCCTimestamp\(\)` + + //nolint:descriptormarshal + d.GetSchema() + + //nolint:descriptormarshal + d.GetSchema() + // nolint:descriptormarshal if t := d.GetTable(); t != nil { panic("foo") @@ -39,7 +63,7 @@ func F() { panic("foo") } - if t := d.GetTable(); t != // want `Illegal call to Descriptor.GetTable\(\), see descpb.TableFromDescriptor\(\)` + if t := d.GetTable(); t != // want `Illegal call to Descriptor.GetTable\(\), see descpb.FromDescriptorWithMVCCTimestamp\(\)` // nolint:descriptormarshal nil { panic("foo") @@ -47,11 +71,11 @@ func F() { // It does not work to put the comment as an inline with the preamble to an // if statement. - if t := d.GetTable(); t != nil { // nolint:descriptormarshal // want `Illegal call to Descriptor.GetTable\(\), see descpb.TableFromDescriptor\(\)` + if t := d.GetTable(); t != nil { // nolint:descriptormarshal // want `Illegal call to Descriptor.GetTable\(\), see descpb.FromDescriptorWithMVCCTimestamp\(\)` panic("foo") } - if t := d.GetTable(); t != nil { // want `Illegal call to Descriptor.GetTable\(\), see descpb.TableFromDescriptor\(\)` + if t := d.GetTable(); t != nil { // want `Illegal call to Descriptor.GetTable\(\), see descpb.FromDescriptorWithMVCCTimestamp\(\)` panic("foo") } } diff --git a/pkg/testutils/lint/passes/forbiddenmethod/testdata/src/github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb/test_descriptor_lint.go b/pkg/testutils/lint/passes/forbiddenmethod/testdata/src/github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb/test_descriptor_lint.go index 8ba32879fb40..3a27cd67c2dd 100644 --- a/pkg/testutils/lint/passes/forbiddenmethod/testdata/src/github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb/test_descriptor_lint.go +++ b/pkg/testutils/lint/passes/forbiddenmethod/testdata/src/github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb/test_descriptor_lint.go @@ -14,6 +14,24 @@ type Descriptor struct{} type TableDescriptor struct{} +type DatabaseDescriptor struct{} + +type TypeDescriptor struct{} + +type SchemaDescriptor struct{} + func (m *Descriptor) GetTable() *TableDescriptor { return nil } + +func (m *Descriptor) GetDatabase() *DatabaseDescriptor { + return nil +} + +func (m *Descriptor) GetType() *TypeDescriptor { + return nil +} + +func (m *Descriptor) GetSchema() *SchemaDescriptor { + return nil +}