Skip to content

Commit

Permalink
[7.x] Telemetry plugins esclient migration (#87356) (#87570)
Browse files Browse the repository at this point in the history
  • Loading branch information
TinaHeiligers authored Jan 6, 2021
1 parent 8fd8556 commit 3e172d0
Show file tree
Hide file tree
Showing 17 changed files with 34 additions and 74 deletions.
1 change: 0 additions & 1 deletion src/plugins/data/server/server.api.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@ import { IUiSettingsClient } from 'src/core/server';
import { IUiSettingsClient as IUiSettingsClient_3 } from 'kibana/server';
import { KibanaRequest } from 'kibana/server';
import { KibanaRequest as KibanaRequest_2 } from 'src/core/server';
import { LegacyAPICaller } from 'src/core/server';
import { Logger } from 'src/core/server';
import { Logger as Logger_2 } from 'kibana/server';
import { LoggerFactory } from '@kbn/logging';
Expand Down
6 changes: 3 additions & 3 deletions src/plugins/telemetry/server/fetcher.ts
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ import {
SavedObjectsClientContract,
SavedObjectsClient,
CoreStart,
ILegacyCustomClusterClient,
ICustomClusterClient,
} from '../../../core/server';
import {
getTelemetryOptIn,
Expand Down Expand Up @@ -65,7 +65,7 @@ export class FetcherTask {
private isSending = false;
private internalRepository?: SavedObjectsClientContract;
private telemetryCollectionManager?: TelemetryCollectionManagerPluginStart;
private elasticsearchClient?: ILegacyCustomClusterClient;
private elasticsearchClient?: ICustomClusterClient;

constructor(initializerContext: PluginInitializerContext<TelemetryConfigType>) {
this.config$ = initializerContext.config.create();
Expand All @@ -79,7 +79,7 @@ export class FetcherTask {
) {
this.internalRepository = new SavedObjectsClient(savedObjects.createInternalRepository());
this.telemetryCollectionManager = telemetryCollectionManager;
this.elasticsearchClient = elasticsearch.legacy.createClient('telemetry-fetcher');
this.elasticsearchClient = elasticsearch.createClient('telemetry-fetcher');

this.intervalId = timer(this.initialCheckDelayMs, this.checkIntervalMs).subscribe(() =>
this.sendIfDue()
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/telemetry/server/plugin.ts
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ export class TelemetryPlugin implements Plugin<TelemetryPluginSetup, TelemetryPl
}

public setup(
{ elasticsearch, http, savedObjects }: CoreSetup,
{ http, savedObjects }: CoreSetup,
{ usageCollection, telemetryCollectionManager }: TelemetryPluginsDepsSetup
): TelemetryPluginSetup {
const currentKibanaVersion = this.currentKibanaVersion;
Expand Down
15 changes: 2 additions & 13 deletions src/plugins/telemetry/server/telemetry_collection/get_kibana.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,7 @@

import { omit } from 'lodash';
import { UsageCollectionSetup } from 'src/plugins/usage_collection/server';
import {
ISavedObjectsRepository,
KibanaRequest,
LegacyAPICaller,
SavedObjectsClientContract,
} from 'kibana/server';
import { ISavedObjectsRepository, KibanaRequest, SavedObjectsClientContract } from 'kibana/server';
import { StatsCollectionContext } from 'src/plugins/telemetry_collection_manager/server';
import { ElasticsearchClient } from 'src/core/server';

Expand Down Expand Up @@ -88,16 +83,10 @@ export function handleKibanaStats(

export async function getKibana(
usageCollection: UsageCollectionSetup,
callWithInternalUser: LegacyAPICaller,
asInternalUser: ElasticsearchClient,
soClient: SavedObjectsClientContract | ISavedObjectsRepository,
kibanaRequest: KibanaRequest | undefined // intentionally `| undefined` to enforce providing the parameter
): Promise<KibanaUsageStats> {
const usage = await usageCollection.bulkFetch(
callWithInternalUser,
asInternalUser,
soClient,
kibanaRequest
);
const usage = await usageCollection.bulkFetch(asInternalUser, soClient, kibanaRequest);
return usageCollection.toObject(usage);
}
Original file line number Diff line number Diff line change
Expand Up @@ -71,15 +71,15 @@ export const getLocalStats: StatsGetter<TelemetryLocalStats> = async (
config,
context
) => {
const { callCluster, usageCollection, esClient, soClient, kibanaRequest } = config;
const { usageCollection, esClient, soClient, kibanaRequest } = config;

return await Promise.all(
clustersDetails.map(async (clustersDetail) => {
const [clusterInfo, clusterStats, nodesUsage, kibana, dataTelemetry] = await Promise.all([
getClusterInfo(esClient), // cluster info
getClusterStats(esClient), // cluster stats (not to be confused with cluster _state_)
getNodesUsage(esClient), // nodes_usage info
getKibana(usageCollection, callCluster, esClient, soClient, kibanaRequest),
getKibana(usageCollection, esClient, soClient, kibanaRequest),
getDataTelemetry(esClient),
]);
return handleLocalStats(
Expand Down
10 changes: 2 additions & 8 deletions src/plugins/telemetry_collection_manager/server/plugin.ts
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ import {
Logger,
IClusterClient,
SavedObjectsServiceStart,
ILegacyClusterClient,
} from 'src/core/server';

import {
Expand All @@ -53,7 +52,6 @@ export class TelemetryCollectionManagerPlugin
private collectionStrategy: CollectionStrategy<any> | undefined;
private usageGetterMethodPriority = -1;
private usageCollection?: UsageCollectionSetup;
private legacyElasticsearchClient?: ILegacyClusterClient;
private elasticsearchClient?: IClusterClient;
private savedObjectsService?: SavedObjectsServiceStart;
private readonly isDistributable: boolean;
Expand All @@ -77,7 +75,6 @@ export class TelemetryCollectionManagerPlugin
}

public start(core: CoreStart) {
this.legacyElasticsearchClient = core.elasticsearch.legacy.client; // TODO: Remove when all the collectors have migrated
this.elasticsearchClient = core.elasticsearch.client;
this.savedObjectsService = core.savedObjects;

Expand Down Expand Up @@ -129,9 +126,6 @@ export class TelemetryCollectionManagerPlugin
config: StatsGetterConfig,
usageCollection: UsageCollectionSetup
): StatsCollectionConfig | undefined {
const callCluster = config.unencrypted
? this.legacyElasticsearchClient?.asScoped(config.request).callAsCurrentUser
: this.legacyElasticsearchClient?.callAsInternalUser;
// Scope the new elasticsearch Client appropriately and pass to the stats collection config
const esClient = config.unencrypted
? this.elasticsearchClient?.asScoped(config.request).asCurrentUser
Expand All @@ -143,8 +137,8 @@ export class TelemetryCollectionManagerPlugin
// Provide the kibanaRequest so opted-in plugins can scope their custom clients only if the request is not encrypted
const kibanaRequest = config.unencrypted ? config.request : void 0;

if (callCluster && esClient && soClient) {
return { callCluster, usageCollection, esClient, soClient, kibanaRequest };
if (esClient && soClient) {
return { usageCollection, esClient, soClient, kibanaRequest };
}
}

Expand Down
2 changes: 0 additions & 2 deletions src/plugins/telemetry_collection_manager/server/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
*/

import {
LegacyAPICaller,
ElasticsearchClient,
Logger,
KibanaRequest,
Expand Down Expand Up @@ -68,7 +67,6 @@ export interface ClusterDetails {

export interface StatsCollectionConfig {
usageCollection: UsageCollectionSetup;
callCluster: LegacyAPICaller;
esClient: ElasticsearchClient;
soClient: SavedObjectsClientContract | ISavedObjectsRepository;
kibanaRequest: KibanaRequest | undefined; // intentionally `| undefined` to enforce providing the parameter
Expand Down
6 changes: 3 additions & 3 deletions src/plugins/usage_collection/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -95,10 +95,10 @@ Some background:

- `isReady` (added in v7.2.0 and v6.8.4) is a way for a usage collector to announce that some async process must finish first before it can return data in the `fetch` method (e.g. a client needs to ne initialized, or the task manager needs to run a task first). If any collector reports that it is not ready when we call its `fetch` method, we reset a flag to try again and, after a set amount of time, collect data from those collectors that are ready and skip any that are not. This means that if a collector returns `true` for `isReady` and it actually isn't ready to return data, there won't be telemetry data from that collector in that telemetry report (usually once per day). You should consider what it means if your collector doesn't return data in the first few documents when Kibana starts or, if we should wait for any other reason (e.g. the task manager needs to run your task first). If you need to tell telemetry collection to wait, you should implement this function with custom logic. If your `fetch` method can run without the need of any previous dependencies, then you can return true for `isReady` as shown in the example below.

- The `fetch` method needs to support multiple contexts in which it is called. For example, when a user requests the example of what we collect in the **Kibana>Advanced Settings>Usage data** section, the clients provided in the context of the function (`CollectorFetchContext`) are scoped to that user's privileges. The reason is to avoid exposing via telemetry any data that user should not have access to (i.e.: if the user does not have access to certain indices, they shouldn't be allowed to see the number of documents that exists in it). In this case, the `fetch` method receives the clients `callCluster`, `esClient` and `soClient` scoped to the user who performed the HTTP API request. Alternatively, when requesting the usage data to be reported to the Remote Telemetry Service, the clients are scoped to the internal Kibana user (`kibana_system`). Please, mind it might have lower-level access than the default super-admin `elastic` test user.
- The `fetch` method needs to support multiple contexts in which it is called. For example, when a user requests the example of what we collect in the **Kibana>Advanced Settings>Usage data** section, the clients provided in the context of the function (`CollectorFetchContext`) are scoped to that user's privileges. The reason is to avoid exposing via telemetry any data that user should not have access to (i.e.: if the user does not have access to certain indices, they shouldn't be allowed to see the number of documents that exists in it). In this case, the `fetch` method receives the clients `esClient` and `soClient` scoped to the user who performed the HTTP API request. Alternatively, when requesting the usage data to be reported to the Remote Telemetry Service, the clients are scoped to the internal Kibana user (`kibana_system`). Please, mind it might have lower-level access than the default super-admin `elastic` test user.
In some scenarios, your collector might need to maintain its own client. An example of that is the `monitoring` plugin, that maintains a connection to the Remote Monitoring Cluster to push its monitoring data. If that's the case, your plugin can opt-in to receive the additional `kibanaRequest` parameter by adding `extendFetchContext.kibanaRequest: true` to the collector's config: it will be appended to the context of the `fetch` method only if the request needs to be scoped to a user other than Kibana Internal, so beware that your collector will need to work for both scenarios (especially for the scenario when `kibanaRequest` is missing).

Note: there will be many cases where you won't need to use the `callCluster`, `esClient` or `soClient` function that gets passed in to your `fetch` method at all. Your feature might have an accumulating value in server memory, or read something from the OS.
Note: there will be many cases where you won't need to use the `esClient` or `soClient` function that gets passed in to your `fetch` method at all. Your feature might have an accumulating value in server memory, or read something from the OS.

In the case of using a custom SavedObjects client, it is up to the plugin to initialize the client to save the data and it is strongly recommended to scope that client to the `kibana_system` user.

Expand Down Expand Up @@ -302,7 +302,7 @@ New fields added to the telemetry payload currently mean that telemetry cluster

There are a few ways you can test that your usage collector is working properly.

1. The `/api/stats?extended=true&legacy=true` HTTP API in Kibana (added in 6.4.0) will call the fetch methods of all the registered collectors, and add them to a stats object you can see in a browser or in curl. To test that your usage collector has been registered correctly and that it has the model of data you expected it to have, call that HTTP API manually and you should see a key in the `usage` object of the response named after your usage collector's `type` field. This method tests the Metricbeat scenario described above where `callCluster` wraps `callWithRequest`.
1. The `/api/stats?extended=true&legacy=true` HTTP API in Kibana (added in 6.4.0) will call the fetch methods of all the registered collectors, and add them to a stats object you can see in a browser or in curl. To test that your usage collector has been registered correctly and that it has the model of data you expected it to have, call that HTTP API manually and you should see a key in the `usage` object of the response named after your usage collector's `type` field. This method tests the Metricbeat scenario described above where the elasticsearch client wraps the call with the request.
2. There is a dev script in x-pack that will give a sample of a payload of data that gets sent up to the telemetry cluster for the sending phase of telemetry. Collected data comes from:
- The `.monitoring-*` indices, when Monitoring is enabled. Monitoring enhances the sent payload of telemetry by producing usage data potentially of multiple clusters that exist in the monitoring data. Monitoring data is time-based, and the time frame of collection is the last 15 minutes.
- Live-pulled from ES API endpoints. This will get just real-time stats without context of historical data.
Expand Down
5 changes: 0 additions & 5 deletions src/plugins/usage_collection/server/collector/collector.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

import {
Logger,
LegacyAPICaller,
ElasticsearchClient,
ISavedObjectsRepository,
SavedObjectsClientContract,
Expand Down Expand Up @@ -54,10 +53,6 @@ export type MakeSchemaFrom<Base> = {
* @remark Bear in mind when testing your collector that your user has the same privileges as the Kibana Internal user to ensure the expected data is sent to the remote cluster.
*/
export type CollectorFetchContext<WithKibanaRequest extends boolean | undefined = false> = {
/**
* @deprecated Scoped Legacy Elasticsearch client: use esClient instead
*/
callCluster: LegacyAPICaller;
/**
* Request-scoped Elasticsearch client
* @remark Bear in mind when testing your collector that your user has the same privileges as the Kibana Internal user to ensure the expected data is sent to the remote cluster (more info: {@link CollectorFetchContext})
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,6 @@ describe('CollectorSet', () => {
loggerSpies.debug.mockRestore();
loggerSpies.warn.mockRestore();
});
const mockCallCluster = jest.fn().mockResolvedValue({ passTest: 1000 });
const mockEsClient = elasticsearchServiceMock.createClusterClient().asInternalUser;
const mockSoClient = savedObjectsRepositoryMock.create();
const req = void 0; // No need to instantiate any KibanaRequest in these tests
Expand Down Expand Up @@ -83,18 +82,19 @@ describe('CollectorSet', () => {
});

it('should log debug status of fetching from the collector', async () => {
mockEsClient.get.mockResolvedValue({ passTest: 1000 } as any);
const collectors = new CollectorSet({ logger });
collectors.registerCollector(
new Collector(logger, {
type: 'MY_TEST_COLLECTOR',
fetch: (collectorFetchContext: any) => {
return collectorFetchContext.callCluster();
return collectorFetchContext.esClient.get();
},
isReady: () => true,
})
);

const result = await collectors.bulkFetch(mockCallCluster, mockEsClient, mockSoClient, req);
const result = await collectors.bulkFetch(mockEsClient, mockSoClient, req);
expect(loggerSpies.debug).toHaveBeenCalledTimes(1);
expect(loggerSpies.debug).toHaveBeenCalledWith(
'Fetching data from MY_TEST_COLLECTOR collector'
Expand All @@ -119,7 +119,7 @@ describe('CollectorSet', () => {

let result;
try {
result = await collectors.bulkFetch(mockCallCluster, mockEsClient, mockSoClient, req);
result = await collectors.bulkFetch(mockEsClient, mockSoClient, req);
} catch (err) {
// Do nothing
}
Expand All @@ -137,7 +137,7 @@ describe('CollectorSet', () => {
})
);

const result = await collectors.bulkFetch(mockCallCluster, mockEsClient, mockSoClient, req);
const result = await collectors.bulkFetch(mockEsClient, mockSoClient, req);
expect(result).toStrictEqual([
{
type: 'MY_TEST_COLLECTOR',
Expand All @@ -155,7 +155,7 @@ describe('CollectorSet', () => {
} as any)
);

const result = await collectors.bulkFetch(mockCallCluster, mockEsClient, mockSoClient, req);
const result = await collectors.bulkFetch(mockEsClient, mockSoClient, req);
expect(result).toStrictEqual([
{
type: 'MY_TEST_COLLECTOR',
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
import { snakeCase } from 'lodash';
import {
Logger,
LegacyAPICaller,
ElasticsearchClient,
ISavedObjectsRepository,
SavedObjectsClientContract,
Expand Down Expand Up @@ -171,7 +170,6 @@ export class CollectorSet {
};

public bulkFetch = async (
callCluster: LegacyAPICaller,
esClient: ElasticsearchClient,
soClient: SavedObjectsClientContract | ISavedObjectsRepository,
kibanaRequest: KibanaRequest | undefined, // intentionally `| undefined` to enforce providing the parameter
Expand All @@ -182,7 +180,6 @@ export class CollectorSet {
this.logger.debug(`Fetching data from ${collector.type} collector`);
try {
const context = {
callCluster,
esClient,
soClient,
...(collector.extendFetchContext.kibanaRequest && { kibanaRequest }),
Expand Down Expand Up @@ -212,14 +209,12 @@ export class CollectorSet {
};

public bulkFetchUsage = async (
callCluster: LegacyAPICaller,
esClient: ElasticsearchClient,
savedObjectsClient: SavedObjectsClientContract | ISavedObjectsRepository,
kibanaRequest: KibanaRequest | undefined // intentionally `| undefined` to enforce providing the parameter
) => {
const usageCollectors = this.getFilteredCollectorSet((c) => c instanceof UsageCollector);
return await this.bulkFetch(
callCluster,
esClient,
savedObjectsClient,
kibanaRequest,
Expand Down
12 changes: 2 additions & 10 deletions src/plugins/usage_collection/server/routes/stats/stats.ts
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ import {
IRouter,
ISavedObjectsRepository,
KibanaRequest,
LegacyAPICaller,
MetricsServiceSetup,
SavedObjectsClientContract,
ServiceStatus,
Expand Down Expand Up @@ -66,17 +65,11 @@ export function registerStatsRoute({
overallStatus$: Observable<ServiceStatus>;
}) {
const getUsage = async (
callCluster: LegacyAPICaller,
esClient: ElasticsearchClient,
savedObjectsClient: SavedObjectsClientContract | ISavedObjectsRepository,
kibanaRequest: KibanaRequest
): Promise<any> => {
const usage = await collectorSet.bulkFetchUsage(
callCluster,
esClient,
savedObjectsClient,
kibanaRequest
);
const usage = await collectorSet.bulkFetchUsage(esClient, savedObjectsClient, kibanaRequest);
return collectorSet.toObject(usage);
};

Expand Down Expand Up @@ -110,7 +103,6 @@ export function registerStatsRoute({

let extended;
if (isExtended) {
const callCluster = context.core.elasticsearch.legacy.client.callAsCurrentUser;
const { asCurrentUser } = context.core.elasticsearch.client;
const savedObjectsClient = context.core.savedObjects.client;

Expand All @@ -122,7 +114,7 @@ export function registerStatsRoute({
}

const usagePromise = shouldGetUsage
? getUsage(callCluster, asCurrentUser, savedObjectsClient, req)
? getUsage(asCurrentUser, savedObjectsClient, req)
: Promise.resolve({});
const [usage, clusterUuid] = await Promise.all([
usagePromise,
Expand Down
2 changes: 0 additions & 2 deletions src/plugins/usage_collection/server/usage_collection.mock.ts
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@ export const createUsageCollectionSetupMock = () => {

export function createCollectorFetchContextMock(): jest.Mocked<CollectorFetchContext<false>> {
const collectorFetchClientsMock: jest.Mocked<CollectorFetchContext<false>> = {
callCluster: elasticsearchServiceMock.createLegacyClusterClient().callAsInternalUser,
esClient: elasticsearchServiceMock.createClusterClient().asInternalUser,
soClient: savedObjectsRepositoryMock.create(),
};
Expand All @@ -61,7 +60,6 @@ export function createCollectorFetchContextWithKibanaMock(): jest.Mocked<
CollectorFetchContext<true>
> {
const collectorFetchClientsMock: jest.Mocked<CollectorFetchContext<true>> = {
callCluster: elasticsearchServiceMock.createLegacyClusterClient().callAsInternalUser,
esClient: elasticsearchServiceMock.createClusterClient().asInternalUser,
soClient: savedObjectsRepositoryMock.create(),
kibanaRequest: httpServerMock.createKibanaRequest(),
Expand Down
Loading

0 comments on commit 3e172d0

Please sign in to comment.