diff --git a/api/client/proto/authservice.pb.go b/api/client/proto/authservice.pb.go index 386cd39818091..e5dc88a2ddab0 100644 --- a/api/client/proto/authservice.pb.go +++ b/api/client/proto/authservice.pb.go @@ -9523,6 +9523,14 @@ func (*PaginatedResource) XXX_OneofWrappers() []interface{} { // ListResourcesRequest defines a request to retrieve resources paginated. Only // one type of resource can be retrieved per request. +// +// NOTE: There are two paths this request can take: +// 1. ListResources: the more efficient path that retrieves resources by subset +// at a time defined by field 'Limit'. Does NOT de-duplicate matches. +// 2. listResourcesWithSort: the less efficient path that retrieves all resources +// upfront by falling back to the traditional GetXXX calls. Used when sorting (SortBy), +// total count of resources (NeedTotalCount), or ResourceType `KindKubernetesCluster` +// is requested. Matches are de-duplicated. type ListResourcesRequest struct { // ResourceType is the resource that is going to be retrieved. ResourceType string `protobuf:"bytes,1,opt,name=ResourceType,proto3" json:"resource_type,omitempty"` diff --git a/api/client/proto/authservice.proto b/api/client/proto/authservice.proto index ecce502c766e8..0555e6e4d4378 100644 --- a/api/client/proto/authservice.proto +++ b/api/client/proto/authservice.proto @@ -1484,6 +1484,14 @@ message PaginatedResource { // ListResourcesRequest defines a request to retrieve resources paginated. Only // one type of resource can be retrieved per request. +// +// NOTE: There are two paths this request can take: +// 1. ListResources: the more efficient path that retrieves resources by subset +// at a time defined by field 'Limit'. Does NOT de-duplicate matches. +// 2. listResourcesWithSort: the less efficient path that retrieves all resources +// upfront by falling back to the traditional GetXXX calls. Used when sorting (SortBy), +// total count of resources (NeedTotalCount), or ResourceType `KindKubernetesCluster` +// is requested. Matches are de-duplicated. message ListResourcesRequest { // ResourceType is the resource that is going to be retrieved. string ResourceType = 1 [ (gogoproto.jsontag) = "resource_type,omitempty" ]; diff --git a/api/types/app.go b/api/types/app.go index f9b4a276ef780..79d3c9c27a194 100644 --- a/api/types/app.go +++ b/api/types/app.go @@ -301,14 +301,17 @@ func (a *AppV3) CheckAndSetDefaults() error { return nil } -// DeduplicateApps deduplicates apps by name. +// DeduplicateApps deduplicates apps by combination of app name and public address. +// Apps can have the same name but also could have different addresses. func DeduplicateApps(apps []Application) (result []Application) { - seen := make(map[string]struct{}) + type key struct{ name, addr string } + seen := make(map[key]struct{}) for _, app := range apps { - if _, ok := seen[app.GetName()]; ok { + key := key{app.GetName(), app.GetPublicAddr()} + if _, ok := seen[key]; ok { continue } - seen[app.GetName()] = struct{}{} + seen[key] = struct{}{} result = append(result, app) } return result diff --git a/api/types/desktop.go b/api/types/desktop.go index 8bc7d23cf15f2..b1a38d5448b0f 100644 --- a/api/types/desktop.go +++ b/api/types/desktop.go @@ -333,5 +333,4 @@ type ListWindowsDesktopsRequest struct { StartKey, PredicateExpression string Labels map[string]string SearchKeywords []string - SortBy SortBy } diff --git a/lib/auth/auth.go b/lib/auth/auth.go index 759813bf4946f..4dd8a508cff76 100644 --- a/lib/auth/auth.go +++ b/lib/auth/auth.go @@ -2856,7 +2856,6 @@ func (a *Server) IterateResourcePages(ctx context.Context, req proto.ListResourc PredicateExpression: req.PredicateExpression, Labels: req.Labels, SearchKeywords: req.SearchKeywords, - SortBy: req.SortBy, }) if err != nil { return nil, trace.Wrap(err) diff --git a/lib/auth/auth_with_roles.go b/lib/auth/auth_with_roles.go index caffef7de5eff..87ef6ccbb529b 100644 --- a/lib/auth/auth_with_roles.go +++ b/lib/auth/auth_with_roles.go @@ -1004,7 +1004,7 @@ func (a *ServerWithRoles) ListResources(ctx context.Context, req proto.ListResou return false, trace.Wrap(err) } - switch match, err := services.MatchResourceByFilters(resource, filter); { + switch match, err := services.MatchResourceByFilters(resource, filter, nil /* ignore dup matches */); { case err != nil: return false, trace.Wrap(err) case match: @@ -1235,7 +1235,7 @@ func (a *ServerWithRoles) listResourcesWithSort(ctx context.Context, req proto.L } // Extract kube clusters into its own list. - clusters := []types.KubeCluster{} + var clusters []types.KubeCluster for _, svc := range kubeservices { for _, legacyCluster := range svc.GetKubernetesClusters() { cluster, err := types.NewKubernetesClusterV3FromLegacyCluster(svc.GetNamespace(), legacyCluster) @@ -1246,7 +1246,7 @@ func (a *ServerWithRoles) listResourcesWithSort(ctx context.Context, req proto.L } } - sortedClusters := types.KubeClusters(types.DeduplicateKubeClusters(clusters)) + sortedClusters := types.KubeClusters(clusters) if err := sortedClusters.SortByCustom(req.SortBy); err != nil { return nil, trace.Wrap(err) } diff --git a/lib/auth/auth_with_roles_test.go b/lib/auth/auth_with_roles_test.go index 94a423834ed82..1a8ed66a53943 100644 --- a/lib/auth/auth_with_roles_test.go +++ b/lib/auth/auth_with_roles_test.go @@ -2227,7 +2227,7 @@ func TestListResources_KindKubernetesCluster(t *testing.T) { testNames := []string{"a", "b", "c", "d"} - // Add some kube services. + // Add a kube service with 3 clusters. kubeService, err := types.NewServer("bar", types.KindKubeService, types.ServerSpecV2{ KubernetesClusters: []*types.KubernetesCluster{{Name: "d"}, {Name: "b"}, {Name: "a"}}, }) @@ -2235,7 +2235,8 @@ func TestListResources_KindKubernetesCluster(t *testing.T) { _, err = s.UpsertKubeServiceV2(ctx, kubeService) require.NoError(t, err) - // Include a duplicate cluster name to test deduplicate. + // Add a kube service with 2 clusters. + // Includes a duplicate cluster name to test deduplicate. kubeService, err = types.NewServer("foo", types.KindKubeService, types.ServerSpecV2{ KubernetesClusters: []*types.KubernetesCluster{{Name: "a"}, {Name: "c"}}, }) @@ -2258,7 +2259,8 @@ func TestListResources_KindKubernetesCluster(t *testing.T) { require.NoError(t, err) require.Len(t, res.Resources, len(testNames)) require.Empty(t, res.NextKey) - require.Empty(t, res.TotalCount) + // There is 2 kube services, but 4 unique clusters. + require.Equal(t, 4, res.TotalCount) clusters, err := types.ResourcesWithLabels(res.Resources).AsKubeClusters() require.NoError(t, err) @@ -2411,3 +2413,170 @@ func TestDeleteUserAppSessions(t *testing.T) { require.NoError(t, err) require.Len(t, sessions, 0) } + +func TestListResources_SortAndDeduplicate(t *testing.T) { + t.Parallel() + ctx := context.Background() + srv := newTestTLSServer(t) + + // Create user, role, and client. + username := "user" + user, role, err := CreateUserAndRole(srv.Auth(), username, nil) + require.NoError(t, err) + identity := TestUser(user.GetName()) + clt, err := srv.NewClient(identity) + require.NoError(t, err) + + // Permit user to get all resources. + role.SetWindowsDesktopLabels(types.Allow, types.Labels{types.Wildcard: {types.Wildcard}}) + require.NoError(t, srv.Auth().UpsertRole(ctx, role)) + + // Define some resource names for testing. + names := []string{"d", "b", "d", "a", "a", "b"} + uniqueNames := []string{"a", "b", "d"} + + tests := []struct { + name string + kind string + insertResources func() + wantNames []string + }{ + { + name: "KindDatabaseServer", + kind: types.KindDatabaseServer, + insertResources: func() { + for i := 0; i < len(names); i++ { + db, err := types.NewDatabaseServerV3(types.Metadata{ + Name: fmt.Sprintf("name-%v", i), + }, types.DatabaseServerSpecV3{ + HostID: "_", + Hostname: "_", + Database: &types.DatabaseV3{ + Metadata: types.Metadata{ + Name: names[i], + }, + Spec: types.DatabaseSpecV3{ + Protocol: "_", + URI: "_", + }, + }, + }) + require.NoError(t, err) + _, err = srv.Auth().UpsertDatabaseServer(ctx, db) + require.NoError(t, err) + } + }, + }, + { + name: "KindAppServer", + kind: types.KindAppServer, + insertResources: func() { + for i := 0; i < len(names); i++ { + server, err := types.NewAppServerV3(types.Metadata{ + Name: fmt.Sprintf("name-%v", i), + }, types.AppServerSpecV3{ + HostID: "_", + App: &types.AppV3{Metadata: types.Metadata{Name: names[i]}, Spec: types.AppSpecV3{URI: "_"}}, + }) + require.NoError(t, err) + _, err = srv.Auth().UpsertApplicationServer(ctx, server) + require.NoError(t, err) + } + }, + }, + { + name: "KindWindowsDesktop", + kind: types.KindWindowsDesktop, + insertResources: func() { + for i := 0; i < len(names); i++ { + desktop, err := types.NewWindowsDesktopV3(names[i], nil, types.WindowsDesktopSpecV3{ + Addr: "_", + HostID: fmt.Sprintf("name-%v", i), + }) + require.NoError(t, err) + require.NoError(t, srv.Auth().UpsertWindowsDesktop(ctx, desktop)) + } + }, + }, + { + name: "KindKubernetesCluster", + kind: types.KindKubernetesCluster, + insertResources: func() { + for i := 0; i < len(names); i++ { + server, err := types.NewServer(fmt.Sprintf("name-%v", i), types.KindKubeService, types.ServerSpecV2{ + KubernetesClusters: []*types.KubernetesCluster{ + // Test dedup inside this list as well as from each service. + {Name: names[i]}, + {Name: names[i]}, + }, + }) + require.NoError(t, err) + _, err = srv.Auth().UpsertKubeServiceV2(ctx, server) + require.NoError(t, err) + } + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + tc.insertResources() + + // Fetch all resources + fetchedResources := make([]types.ResourceWithLabels, 0, len(uniqueNames)) + resp, err := clt.ListResources(ctx, proto.ListResourcesRequest{ + ResourceType: tc.kind, + NeedTotalCount: true, + Limit: 2, + SortBy: types.SortBy{Field: types.ResourceMetadataName, IsDesc: true}, + }) + require.NoError(t, err) + require.Len(t, resp.Resources, 2) + require.Equal(t, len(uniqueNames), resp.TotalCount) + fetchedResources = append(fetchedResources, resp.Resources...) + + resp, err = clt.ListResources(ctx, proto.ListResourcesRequest{ + ResourceType: tc.kind, + NeedTotalCount: true, + StartKey: resp.NextKey, + Limit: 2, + SortBy: types.SortBy{Field: types.ResourceMetadataName, IsDesc: true}, + }) + require.NoError(t, err) + require.Len(t, resp.Resources, 1) + require.Equal(t, len(uniqueNames), resp.TotalCount) + fetchedResources = append(fetchedResources, resp.Resources...) + + r := types.ResourcesWithLabels(fetchedResources) + var extractedErr error + var extractedNames []string + + switch tc.kind { + case types.KindDatabaseServer: + s, err := r.AsDatabaseServers() + require.NoError(t, err) + extractedNames, extractedErr = types.DatabaseServers(s).GetFieldVals(types.ResourceMetadataName) + + case types.KindAppServer: + s, err := r.AsAppServers() + require.NoError(t, err) + extractedNames, extractedErr = types.AppServers(s).GetFieldVals(types.ResourceMetadataName) + + case types.KindWindowsDesktop: + s, err := r.AsWindowsDesktops() + require.NoError(t, err) + extractedNames, extractedErr = types.WindowsDesktops(s).GetFieldVals(types.ResourceMetadataName) + + default: + s, err := r.AsKubeClusters() + require.NoError(t, err) + require.Len(t, s, 3) + extractedNames, extractedErr = types.KubeClusters(s).GetFieldVals(types.ResourceMetadataName) + } + + require.NoError(t, extractedErr) + require.ElementsMatch(t, uniqueNames, extractedNames) + require.IsDecreasing(t, extractedNames) + }) + } +} diff --git a/lib/auth/grpcserver_test.go b/lib/auth/grpcserver_test.go index 42519a60a806c..367dd85188f35 100644 --- a/lib/auth/grpcserver_test.go +++ b/lib/auth/grpcserver_test.go @@ -2206,7 +2206,7 @@ func TestListResources(t *testing.T) { require.NoError(t, err) require.Len(t, resp.Resources, 2) require.Empty(t, resp.NextKey) - require.Empty(t, resp.TotalCount) + require.Equal(t, 2, resp.TotalCount) } // Test listing with NeedTotalCount flag. diff --git a/lib/cache/cache_test.go b/lib/cache/cache_test.go index e7c92b026ecbd..d4e1bc2c1e63d 100644 --- a/lib/cache/cache_test.go +++ b/lib/cache/cache_test.go @@ -701,19 +701,16 @@ func benchGetNodes(b *testing.B, nodeCount int) { ctx := context.Background() for i := 0; i < nodeCount; i++ { - func() { - server := suite.NewServer(types.KindNode, uuid.New().String(), "127.0.0.1:2022", apidefaults.Namespace) - _, err := p.presenceS.UpsertNode(ctx, server) - require.NoError(b, err) - timeout := time.NewTimer(time.Millisecond * 200) - defer timeout.Stop() - select { - case event := <-p.eventsC: - require.Equal(b, EventProcessed, event.Type) - case <-timeout.C: - b.Fatalf("timeout waiting for event, iteration=%d", i) - } - }() + server := suite.NewServer(types.KindNode, uuid.New().String(), "127.0.0.1:2022", apidefaults.Namespace) + _, err := p.presenceS.UpsertNode(ctx, server) + require.NoError(b, err) + + select { + case event := <-p.eventsC: + require.Equal(b, EventProcessed, event.Type) + case <-time.After(200 * time.Millisecond): + b.Fatalf("timeout waiting for event, iteration=%d", i) + } } b.ResetTimer() @@ -781,6 +778,66 @@ func benchListNodes(b *testing.B, nodeCount int, pageSize int) { } } +/* +goos: linux +goarch: amd64 +pkg: github.com/gravitational/teleport/lib/cache +cpu: Intel(R) Core(TM) i7-8550U CPU @ 1.80GHz +BenchmarkListResourcesWithSort-8 1 2351035036 ns/op +*/ +func BenchmarkListResourcesWithSort(b *testing.B) { + p, err := newPack(b.TempDir(), ForAuth, memoryBackend(true)) + require.NoError(b, err) + defer p.Close() + + ctx := context.Background() + + count := 100000 + for i := 0; i < count; i++ { + server := suite.NewServer(types.KindNode, uuid.New().String(), "127.0.0.1:2022", apidefaults.Namespace) + // Set some static and dynamic labels. + server.Metadata.Labels = map[string]string{"os": "mac", "env": "prod", "country": "us", "tier": "frontend"} + server.Spec.CmdLabels = map[string]types.CommandLabelV2{ + "version": {Result: "v8"}, + "time": {Result: "now"}, + } + _, err := p.presenceS.UpsertNode(ctx, server) + require.NoError(b, err) + + select { + case event := <-p.eventsC: + require.Equal(b, EventProcessed, event.Type) + case <-time.After(200 * time.Millisecond): + b.Fatalf("timeout waiting for event, iteration=%d", i) + } + } + + b.ResetTimer() + + for _, limit := range []int32{100, 1_000, 10_000, 100_000} { + for _, totalCount := range []bool{true, false} { + b.Run(fmt.Sprintf("limit=%d,needTotal=%t", limit, totalCount), func(b *testing.B) { + for n := 0; n < b.N; n++ { + resp, err := p.cache.ListResources(ctx, proto.ListResourcesRequest{ + ResourceType: types.KindNode, + Namespace: apidefaults.Namespace, + SortBy: types.SortBy{ + IsDesc: true, + Field: types.ResourceSpecHostname, + }, + // Predicate is the more expensive filter. + PredicateExpression: `search("mac", "frontend") && labels.version == "v8"`, + Limit: limit, + NeedTotalCount: totalCount, + }) + require.NoError(b, err) + require.Len(b, resp.Resources, int(limit)) + } + }) + } + } +} + // TestListResources_NodesTTLVariant verifies that the custom ListNodes impl that we fallback to when // using ttl-based caching works as expected. func TestListResources_NodesTTLVariant(t *testing.T) { diff --git a/lib/services/local/desktops.go b/lib/services/local/desktops.go index 1eae39b1cc566..dd5efb995880f 100644 --- a/lib/services/local/desktops.go +++ b/lib/services/local/desktops.go @@ -199,7 +199,7 @@ func (s *WindowsDesktopService) ListWindowsDesktops(ctx context.Context, req typ continue } - switch match, err := services.MatchResourceByFilters(desktop, filter); { + switch match, err := services.MatchResourceByFilters(desktop, filter, nil /* ignore dup matches */); { case err != nil: return false, trace.Wrap(err) case match: diff --git a/lib/services/local/presence.go b/lib/services/local/presence.go index b92bd6e20f83d..49828bb65492f 100644 --- a/lib/services/local/presence.go +++ b/lib/services/local/presence.go @@ -1614,7 +1614,7 @@ func (s *PresenceService) listResources(ctx context.Context, req proto.ListResou return false, trace.Wrap(err) } - switch match, err := services.MatchResourceByFilters(resource, filter); { + switch match, err := services.MatchResourceByFilters(resource, filter, nil /* ignore dup matches */); { case err != nil: return false, trace.Wrap(err) case match: @@ -1685,6 +1685,30 @@ func (s *PresenceService) listResourcesWithSort(ctx context.Context, req proto.L } resources = servers.AsResources() + case types.KindKubernetesCluster: + kubeservices, err := s.GetKubeServices(ctx) + if err != nil { + return nil, trace.Wrap(err) + } + + // Extract kube clusters into its own list. + var clusters []types.KubeCluster + for _, svc := range kubeservices { + for _, legacyCluster := range svc.GetKubernetesClusters() { + cluster, err := types.NewKubernetesClusterV3FromLegacyCluster(svc.GetNamespace(), legacyCluster) + if err != nil { + return nil, trace.Wrap(err) + } + clusters = append(clusters, cluster) + } + } + + sortedClusters := types.KubeClusters(clusters) + if err := sortedClusters.SortByCustom(req.SortBy); err != nil { + return nil, trace.Wrap(err) + } + resources = sortedClusters.AsResources() + default: return nil, trace.NotImplemented("resource type %q is not supported for ListResourcesWithSort", req.ResourceType) } @@ -1693,37 +1717,13 @@ func (s *PresenceService) listResourcesWithSort(ctx context.Context, req proto.L } // FakePaginate is used when we are working with an entire list of resources upfront but still requires pagination. +// While applying filters, it will also deduplicate matches found. func FakePaginate(resources []types.ResourceWithLabels, req proto.ListResourcesRequest) (*types.ListResourcesResponse, error) { if err := req.CheckAndSetDefaults(); err != nil { return nil, trace.Wrap(err) } - // filterAll is a flag to continue matching even after we found limit, - // to determine the total count. - filterAll := req.NeedTotalCount - - // Trim resources that precede start key, except when total count - // was requested, then we have to filter all to get accurate count. - pageStart := 0 - if req.StartKey != "" { - for i, resource := range resources { - if backend.GetPaginationKey(resource) == req.StartKey { - pageStart = i - break - } - } - - if !filterAll { - resources = resources[pageStart:] - } - } - - // Iterate and filter resources, finding match up to limit+1 (+1 to determine next key), - // and if total count is not required, we halt matching when we reach page limit, else - // we continue to match (but not include into list of filtered) to determine the total count. - matchCount := 0 limit := int(req.Limit) - var nextKey string var filtered []types.ResourceWithLabels filter := services.MatchResourceFilter{ ResourceKind: req.ResourceType, @@ -1732,40 +1732,45 @@ func FakePaginate(resources []types.ResourceWithLabels, req proto.ListResourcesR PredicateExpression: req.PredicateExpression, } - for currIndex, resource := range resources { - switch match, err := services.MatchResourceByFilters(resource, filter); { + // Iterate and filter every resource, deduplicating while matching. + seenResourceMap := make(map[services.ResourceSeenKey]struct{}) + for _, resource := range resources { + switch match, err := services.MatchResourceByFilters(resource, filter, seenResourceMap); { case err != nil: return nil, trace.Wrap(err) case !match: continue } - matchCount++ - if filterAll && nextKey != "" { - continue - } + filtered = append(filtered, resource) + } + + totalCount := len(filtered) + pageStart := 0 + pageEnd := limit - if len(filtered) == limit { - nextKey = backend.GetPaginationKey(resource) - if !filterAll { + // Trim resources that precede start key. + if req.StartKey != "" { + for i, resource := range filtered { + if backend.GetPaginationKey(resource) == req.StartKey { + pageStart = i break } - continue - } - - if !filterAll || currIndex >= pageStart { - filtered = append(filtered, resource) } + pageEnd = limit + pageStart } - if !filterAll { - matchCount = 0 + var nextKey string + if pageEnd >= len(filtered) { + pageEnd = len(filtered) + } else { + nextKey = backend.GetPaginationKey(filtered[pageEnd]) } return &types.ListResourcesResponse{ - Resources: filtered, + Resources: filtered[pageStart:pageEnd], NextKey: nextKey, - TotalCount: matchCount, + TotalCount: totalCount, }, nil } diff --git a/lib/services/local/presence_test.go b/lib/services/local/presence_test.go index 88c3e91592d0a..ed534ca15f987 100644 --- a/lib/services/local/presence_test.go +++ b/lib/services/local/presence_test.go @@ -725,65 +725,6 @@ func TestListResources(t *testing.T) { return resultResourcesWithMatchExprsLen == totalWithLabels }, time.Second, 100*time.Millisecond) - // Test sorting by metadata.name, since not all resources support sorting: - sortBy := types.SortBy{Field: types.ResourceMetadataName, IsDesc: true} - var sortedResources []types.ResourceWithLabels - - switch test.resourceType { - case types.KindNode, types.KindAppServer, types.KindDatabaseServer: - // Test NeedTotalCount flag. - res, err := presence.ListResources(ctx, proto.ListResourcesRequest{ - ResourceType: test.resourceType, - NeedTotalCount: true, - Limit: 1, - }) - require.NoError(t, err) - require.Len(t, res.Resources, 1) - require.NotEmpty(t, res.NextKey) - require.Equal(t, totalResources, res.TotalCount) - - // Test sorting. - require.Eventually(t, func() bool { - resp, err = presence.ListResources(ctx, proto.ListResourcesRequest{ - Limit: int32(resourcesPerPage), - Namespace: apidefaults.Namespace, - ResourceType: test.resourceType, - StartKey: resp.NextKey, - SortBy: sortBy, - }) - require.NoError(t, err) - require.Empty(t, resp.TotalCount) - - sortedResources = append(sortedResources, resp.Resources...) - if len(sortedResources) == totalResources { - require.Empty(t, resp.NextKey) - } - return len(sortedResources) == totalResources - }, time.Second, 100*time.Millisecond) - } - - // Test sorted resources are in the correct direction. - switch test.resourceType { - case types.KindNode: - servers, err := types.ResourcesWithLabels(sortedResources).AsServers() - require.NoError(t, err) - fieldVals, err := types.Servers(servers).GetFieldVals(sortBy.Field) - require.NoError(t, err) - require.IsDecreasing(t, fieldVals) - case types.KindAppServer: - servers, err := types.ResourcesWithLabels(sortedResources).AsAppServers() - require.NoError(t, err) - fieldVals, err := types.AppServers(servers).GetFieldVals(sortBy.Field) - require.NoError(t, err) - require.IsDecreasing(t, fieldVals) - case types.KindDatabaseServer: - servers, err := types.ResourcesWithLabels(sortedResources).AsDatabaseServers() - require.NoError(t, err) - fieldVals, err := types.DatabaseServers(servers).GetFieldVals(sortBy.Field) - require.NoError(t, err) - require.IsDecreasing(t, fieldVals) - } - // delete everything err = test.deleteAllResourcesFunc(ctx, presence) require.NoError(t, err) @@ -899,7 +840,6 @@ func TestListResources_Helpers(t *testing.T) { resp, err := tc.fetch(req) require.NoError(t, err) require.Empty(t, resp.NextKey) - require.Empty(t, resp.TotalCount) fetchedNodes, err := types.ResourcesWithLabels(resp.Resources).AsServers() require.NoError(t, err) @@ -921,7 +861,6 @@ func TestListResources_Helpers(t *testing.T) { }) require.NoError(t, err) require.Len(t, resp.Resources, 10) - require.Empty(t, resp.TotalCount) fetchedNodes, err := types.ResourcesWithLabels(resp.Resources).AsServers() require.NoError(t, err) @@ -937,7 +876,6 @@ func TestListResources_Helpers(t *testing.T) { }) require.NoError(t, err) require.Len(t, resp.Resources, 5) - require.Empty(t, resp.TotalCount) fetchedNodes, err = types.ResourcesWithLabels(resp.Resources).AsServers() require.NoError(t, err) @@ -945,7 +883,7 @@ func TestListResources_Helpers(t *testing.T) { require.Equal(t, backend.GetPaginationKey(nodes[15]), resp.NextKey) // 16th item // Last fetch. - resp, err = presence.listResources(ctx, proto.ListResourcesRequest{ + resp, err = tc.fetch(proto.ListResourcesRequest{ ResourceType: types.KindNode, Namespace: namespace, StartKey: resp.NextKey, @@ -953,7 +891,6 @@ func TestListResources_Helpers(t *testing.T) { }) require.NoError(t, err) require.Len(t, resp.Resources, 5) - require.Empty(t, resp.TotalCount) fetchedNodes, err = types.ResourcesWithLabels(resp.Resources).AsServers() require.NoError(t, err) @@ -981,7 +918,6 @@ func TestListResources_Helpers(t *testing.T) { require.Len(t, resp.Resources, 1) require.Equal(t, targetVal, resp.Resources[0].GetName()) require.Empty(t, resp.NextKey) - require.Empty(t, resp.TotalCount) }) } }) @@ -1177,3 +1113,120 @@ func TestPresenceService_CancelSemaphoreLease(t *testing.T) { require.Len(t, semaphores, 1) require.Empty(t, semaphores[0].LeaseRefs()) } + +// TestListResources_DuplicateResourceFilterByLabel tests that we can search for a specific label +// among duplicated resources, and once a match is found, excludes duplicated matches from the result. +func TestListResources_DuplicateResourceFilterByLabel(t *testing.T) { + t.Parallel() + ctx := context.Background() + + backend, err := lite.NewWithConfig(ctx, lite.Config{ + Path: t.TempDir(), + Clock: clockwork.NewFakeClock(), + }) + require.NoError(t, err) + + presence := NewPresenceService(backend) + + // Same resource name, but have different labels. + names := []string{"a", "a", "a", "a"} + labels := []map[string]string{ + {"env": "prod"}, + {"env": "dev"}, + {"env": "qa"}, + {"env": "dev"}, + } + + tests := []struct { + name string + kind string + insertResources func() + wantNames []string + }{ + { + name: "KindDatabaseServer", + kind: types.KindDatabaseServer, + insertResources: func() { + for i := 0; i < len(names); i++ { + db, err := types.NewDatabaseServerV3(types.Metadata{ + Name: fmt.Sprintf("name-%v", i), + }, types.DatabaseServerSpecV3{ + HostID: "_", + Hostname: "_", + Database: &types.DatabaseV3{ + Metadata: types.Metadata{ + Name: names[i], + Labels: labels[i], + }, + Spec: types.DatabaseSpecV3{ + Protocol: "_", + URI: "_", + }, + }, + }) + require.NoError(t, err) + _, err = presence.UpsertDatabaseServer(ctx, db) + require.NoError(t, err) + } + }, + }, + { + name: "KindAppServer", + kind: types.KindAppServer, + insertResources: func() { + for i := 0; i < len(names); i++ { + server, err := types.NewAppServerV3(types.Metadata{ + Name: fmt.Sprintf("name-%v", i), + }, types.AppServerSpecV3{ + HostID: "_", + App: &types.AppV3{ + Metadata: types.Metadata{ + Name: names[i], + Labels: labels[i], + }, + Spec: types.AppSpecV3{URI: "_"}}, + }) + require.NoError(t, err) + _, err = presence.UpsertApplicationServer(ctx, server) + require.NoError(t, err) + } + }, + }, + { + name: "KindKubernetesCluster", + kind: types.KindKubernetesCluster, + insertResources: func() { + for i := 0; i < len(names); i++ { + server, err := types.NewServer(fmt.Sprintf("name-%v", i), types.KindKubeService, types.ServerSpecV2{ + KubernetesClusters: []*types.KubernetesCluster{ + // Test dedup inside this list as well as from each service. + {Name: names[i], StaticLabels: labels[i]}, + {Name: names[i], StaticLabels: labels[i]}, + }, + }) + require.NoError(t, err) + _, err = presence.UpsertKubeServiceV2(ctx, server) + require.NoError(t, err) + } + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + tc.insertResources() + + // Look among the duplicated resource by label + resp, err := presence.ListResources(ctx, proto.ListResourcesRequest{ + ResourceType: tc.kind, + NeedTotalCount: true, + Limit: 5, + SearchKeywords: []string{"dev"}, + }) + require.NoError(t, err) + require.Len(t, resp.Resources, 1) + require.Equal(t, 1, resp.TotalCount) + require.Equal(t, map[string]string{"env": "dev"}, resp.Resources[0].GetAllLabels()) + }) + } +} diff --git a/lib/services/matchers.go b/lib/services/matchers.go index 4614a6815eac7..2607d5adfed40 100644 --- a/lib/services/matchers.go +++ b/lib/services/matchers.go @@ -58,24 +58,37 @@ func MatchResourceLabels(matchers []ResourceMatcher, resource types.ResourceWith return false } +// ResourceSeenKey is used as a key for a map that keeps track +// of unique resource names and address. Currently "addr" +// only applies to resource Application. +type ResourceSeenKey struct{ name, addr string } + // MatchResourceByFilters returns true if all filter values given matched against the resource. -// For resource KubeService, b/c of its 1-N relationhip with service-clusters, +// +// If no filters were provided, we will treat that as a match. +// +// If a `seenMap` is provided, this will be treated as a request to filter out duplicate matches. +// The map will be modified in place as it adds new keys. Seen keys will return match as false. +// +// Resource KubeService is handled differently b/c of its 1-N relationhip with service-clusters, // it filters out the non-matched clusters on the kube service and the kube service -// is modified in place with only the matched clusters. -func MatchResourceByFilters(resource types.ResourceWithLabels, filter MatchResourceFilter) (bool, error) { - if len(filter.Labels) == 0 && len(filter.SearchKeywords) == 0 && filter.PredicateExpression == "" { - return true, nil - } - +// is modified in place with only the matched clusters. Deduplication for resource `KubeService` +// is not provided but is provided for kind `KubernetesCluster`. +func MatchResourceByFilters(resource types.ResourceWithLabels, filter MatchResourceFilter, seenMap map[ResourceSeenKey]struct{}) (bool, error) { var specResource types.ResourceWithLabels // We assume when filtering for services like KubeService, AppServer, and DatabaseServer // the user is wanting to filter the contained resource ie. KubeClusters, Application, and Database. + resourceKey := ResourceSeenKey{} switch filter.ResourceKind { case types.KindNode, types.KindWindowsDesktop, types.KindKubernetesCluster: specResource = resource + resourceKey.name = specResource.GetName() case types.KindKubeService: + if seenMap != nil { + return false, trace.BadParameter("checking for duplicate matches for resource kind %q is not supported", filter.ResourceKind) + } return matchAndFilterKubeClusters(resource, filter) case types.KindAppServer: @@ -84,6 +97,9 @@ func MatchResourceByFilters(resource types.ResourceWithLabels, filter MatchResou return false, trace.BadParameter("expected types.AppServer, got %T", resource) } specResource = server.GetApp() + app := server.GetApp() + resourceKey.name = app.GetName() + resourceKey.addr = app.GetPublicAddr() case types.KindDatabaseServer: server, ok := resource.(types.DatabaseServer) @@ -91,12 +107,35 @@ func MatchResourceByFilters(resource types.ResourceWithLabels, filter MatchResou return false, trace.BadParameter("expected types.DatabaseServer, got %T", resource) } specResource = server.GetDatabase() + resourceKey.name = specResource.GetName() default: return false, trace.NotImplemented("filtering for resource kind %q not supported", filter.ResourceKind) } - return matchResourceByFilters(specResource, filter) + var match bool + + if len(filter.Labels) == 0 && len(filter.SearchKeywords) == 0 && filter.PredicateExpression == "" { + match = true + } + + if !match { + var err error + match, err = matchResourceByFilters(specResource, filter) + if err != nil { + return false, trace.Wrap(err) + } + } + + // Deduplicate matches. + if match && seenMap != nil { + if _, exists := seenMap[resourceKey]; exists { + return false, nil + } + seenMap[resourceKey] = struct{}{} + } + + return match, nil } func matchResourceByFilters(resource types.ResourceWithLabels, filter MatchResourceFilter) (bool, error) { @@ -132,6 +171,10 @@ func matchResourceByFilters(resource types.ResourceWithLabels, filter MatchResou // modified in place with only the matched clusters // 3) only returns true if the service contained any matched cluster func matchAndFilterKubeClusters(resource types.ResourceWithLabels, filter MatchResourceFilter) (bool, error) { + if len(filter.Labels) == 0 && len(filter.SearchKeywords) == 0 && filter.PredicateExpression == "" { + return true, nil + } + server, ok := resource.(types.Server) if !ok { return false, trace.BadParameter("expected types.Server, got %T", resource) diff --git a/lib/services/matchers_test.go b/lib/services/matchers_test.go index 1e6edf23f43d9..60e04d7c2560b 100644 --- a/lib/services/matchers_test.go +++ b/lib/services/matchers_test.go @@ -375,9 +375,13 @@ func TestMatchResourceByFilters(t *testing.T) { resource func() types.ResourceWithLabels }{ { - name: "empty filter", - resource: func() types.ResourceWithLabels { return nil }, - filters: MatchResourceFilter{}, + name: "no filter should return true", + resource: func() types.ResourceWithLabels { + server, err := types.NewServer("foo", types.KindNode, types.ServerSpecV2{}) + require.NoError(t, err) + return server + }, + filters: MatchResourceFilter{ResourceKind: types.KindNode}, }, { name: "unsupported resource kind", @@ -497,7 +501,7 @@ func TestMatchResourceByFilters(t *testing.T) { t.Parallel() resource := tc.resource() - match, err := MatchResourceByFilters(resource, tc.filters) + match, err := MatchResourceByFilters(resource, tc.filters, nil) switch tc.wantNotImplErr { case true: diff --git a/lib/web/apps.go b/lib/web/apps.go index 3a77d15dbf75b..9ca45cfd6967d 100644 --- a/lib/web/apps.go +++ b/lib/web/apps.go @@ -74,7 +74,7 @@ func (h *Handler) clusterAppsGet(w http.ResponseWriter, r *http.Request, p httpr LocalProxyDNSName: h.proxyDNSName(), AppClusterName: appClusterName, Identity: identity, - Apps: types.DeduplicateApps(apps), + Apps: apps, }), StartKey: resp.NextKey, TotalCount: resp.TotalCount, diff --git a/lib/web/resources.go b/lib/web/resources.go index 41f81bcfcbc5e..d495b6e77e90f 100644 --- a/lib/web/resources.go +++ b/lib/web/resources.go @@ -320,7 +320,7 @@ func listResources(clt resourcesAPIGetter, r *http.Request, resourceKind string) ResourceType: resourceKind, Limit: int32(limit), StartKey: startKey, - NeedTotalCount: startKey == "", + NeedTotalCount: true, SortBy: sortBy, PredicateExpression: values.Get("query"), SearchKeywords: client.ParseSearchKeywords(values.Get("search"), ' '), diff --git a/lib/web/resources_test.go b/lib/web/resources_test.go index 5fda6fa1c2acb..63fb8d706dd6d 100644 --- a/lib/web/resources_test.go +++ b/lib/web/resources_test.go @@ -363,6 +363,7 @@ func TestListResources(t *testing.T) { SearchKeywords: []string{"foo+bar", "baz", "foo,bar", "some phrase"}, PredicateExpression: `labels.env == "prod"`, SortBy: types.SortBy{Field: "foo", IsDesc: true}, + NeedTotalCount: true, }, }, { diff --git a/lib/web/servers.go b/lib/web/servers.go index 3e8c34d8825d3..b0dbd1938e351 100644 --- a/lib/web/servers.go +++ b/lib/web/servers.go @@ -75,7 +75,7 @@ func (h *Handler) clusterDatabasesGet(w http.ResponseWriter, r *http.Request, p } return listResourcesGetResponse{ - Items: ui.MakeDatabases(h.auth.clusterName, types.DeduplicateDatabases(databases)), + Items: ui.MakeDatabases(h.auth.clusterName, databases), StartKey: resp.NextKey, TotalCount: resp.TotalCount, }, nil @@ -97,7 +97,6 @@ func (h *Handler) clusterDesktopsGet(w http.ResponseWriter, r *http.Request, p h if err != nil { return nil, trace.Wrap(err) } - windowsDesktops = types.DeduplicateDesktops(windowsDesktops) return listResourcesGetResponse{ Items: ui.MakeDesktops(windowsDesktops),