Skip to content

Commit

Permalink
[7.17] Bump postcss to ^8 (#136303) (#154723)
Browse files Browse the repository at this point in the history
# Backport

This will backport the following commits from `main` to `7.17`:
- [Bump postcss to ^8
(#136303)](#136303)

<!--- Backport version: 8.9.7 -->

### Questions ?
Please refer to the [Backport tool
documentation](https://github.com/sqren/backport)

<!--BACKPORT [{"author":{"name":"Jonathan
Budzenski","email":"[email protected]"},"sourceCommit":{"committedDate":"2022-07-19T19:06:20Z","message":"Bump
postcss to ^8 (#136303)\n\n* Bump postcss to ^8\r\n\r\n* fixes\r\n\r\n*
fix config path\r\n\r\n* fix path\r\n\r\n*
cleanup","sha":"88d64408c93cbb7ff2e0ae5942d05577a71d1fa4","branchLabelMapping":{"^v8.4.0$":"main","^v(\\d+).(\\d+).\\d+$":"$1.$2"}},"sourcePullRequest":{"labels":["Team:Operations","release_note:skip","backport:skip","v8.4.0","ci:cloud-deploy"],"number":136303,"url":"https://github.com/elastic/kibana/pull/136303","mergeCommit":{"message":"Bump
postcss to ^8 (#136303)\n\n* Bump postcss to ^8\r\n\r\n* fixes\r\n\r\n*
fix config path\r\n\r\n* fix path\r\n\r\n*
cleanup","sha":"88d64408c93cbb7ff2e0ae5942d05577a71d1fa4"}},"sourceBranch":"main","suggestedTargetBranches":[],"targetPullRequestStates":[{"branch":"main","label":"v8.4.0","labelRegex":"^v8.4.0$","isSourceBranch":true,"state":"MERGED","url":"https://github.com/elastic/kibana/pull/136303","number":136303,"mergeCommit":{"message":"Bump
postcss to ^8 (#136303)\n\n* Bump postcss to ^8\r\n\r\n* fixes\r\n\r\n*
fix config path\r\n\r\n* fix path\r\n\r\n*
cleanup","sha":"88d64408c93cbb7ff2e0ae5942d05577a71d1fa4"}}]}]
BACKPORT-->

---------

Co-authored-by: Jonathan Budzenski <[email protected]>
  • Loading branch information
legrego and jbudz authored Apr 12, 2023
1 parent 4f76717 commit 8bb030c
Show file tree
Hide file tree
Showing 55 changed files with 2,180 additions and 2,112 deletions.
16 changes: 9 additions & 7 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -648,7 +648,7 @@
"antlr4ts-cli": "^0.5.0-alpha.3",
"apidoc-markdown": "^7.2.4",
"argsplit": "^1.0.5",
"autoprefixer": "^9.7.4",
"autoprefixer": "^10.4.7",
"axe-core": "^4.0.2",
"babel-jest": "^27.5.1",
"babel-loader": "^8.2.2",
Expand Down Expand Up @@ -719,7 +719,7 @@
"gulp-babel": "^8.0.0",
"gulp-brotli": "^3.0.0",
"gulp-gzip": "^1.4.2",
"gulp-postcss": "^8.0.0",
"gulp-postcss": "^9.0.1",
"gulp-sourcemaps": "2.6.5",
"gulp-terser": "^2.0.1",
"gulp-zip": "^5.0.2",
Expand Down Expand Up @@ -775,11 +775,13 @@
"pirates": "^4.0.1",
"pixelmatch": "^5.3.0",
"playwright": "^1.17.1",
"postcss": "^7.0.32",
"postcss-loader": "^3.0.0",
"postcss-prefix-selector": "^1.7.2",
"prettier": "^2.4.0",
"pretty-ms": "5.0.0",
"pngjs": "^3.4.0",
"postcss": "^8.4.14",
"postcss-loader": "^4.2.0",
"postcss-prefix-selector": "^1.16.0",
"prettier": "^2.7.1",
"pretty-format": "^27.5.1",
"pretty-ms": "^5.0.0",
"proxy": "^1.0.2",
"q": "^1.5.1",
"react-test-renderer": "^16.12.0",
Expand Down
4 changes: 2 additions & 2 deletions packages/kbn-optimizer/src/worker/webpack.config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -154,8 +154,8 @@ export function getWebpackConfig(bundle: Bundle, bundleRefs: BundleRefs, worker:
loader: 'postcss-loader',
options: {
sourceMap: !worker.dist,
config: {
path: require.resolve('@kbn/optimizer/postcss.config.js'),
postcssOptions: {
config: require.resolve('@kbn/optimizer/postcss.config.js'),
},
},
},
Expand Down
4 changes: 2 additions & 2 deletions packages/kbn-storybook/src/webpack.config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -93,8 +93,8 @@ export default function ({ config: storybookConfig }: { config: Configuration })
{
loader: 'postcss-loader',
options: {
config: {
path: require.resolve('@kbn/optimizer/postcss.config.js'),
postcssOptions: {
config: require.resolve('@kbn/optimizer/postcss.config.js'),
},
},
},
Expand Down
118 changes: 60 additions & 58 deletions src/core/server/saved_objects/migrationsv2/actions/create_index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -55,72 +55,74 @@ export const createIndex = ({
mappings,
aliases = [],
}: CreateIndexParams): TaskEither.TaskEither<RetryableEsClientError, 'create_index_succeeded'> => {
const createIndexTask: TaskEither.TaskEither<RetryableEsClientError, AcknowledgeResponse> =
() => {
const aliasesObject = aliasArrayToRecord(aliases);
const createIndexTask: TaskEither.TaskEither<
RetryableEsClientError,
AcknowledgeResponse
> = () => {
const aliasesObject = aliasArrayToRecord(aliases);

return client.indices
.create(
{
index: indexName,
// wait until all shards are available before creating the index
// (since number_of_shards=1 this does not have any effect atm)
wait_for_active_shards: WAIT_FOR_ALL_SHARDS_TO_BE_ACTIVE,
// Wait up to 60s for the cluster state to update and all shards to be
// started
timeout: DEFAULT_TIMEOUT,
body: {
mappings,
aliases: aliasesObject,
settings: {
index: {
// ES rule of thumb: shards should be several GB to 10's of GB, so
// Kibana is unlikely to cross that limit.
number_of_shards: 1,
auto_expand_replicas: INDEX_AUTO_EXPAND_REPLICAS,
// Set an explicit refresh interval so that we don't inherit the
// value from incorrectly configured index templates (not required
// after we adopt system indices)
refresh_interval: '1s',
// Bump priority so that recovery happens before newer indices
priority: 10,
},
return client.indices
.create(
{
index: indexName,
// wait until all shards are available before creating the index
// (since number_of_shards=1 this does not have any effect atm)
wait_for_active_shards: WAIT_FOR_ALL_SHARDS_TO_BE_ACTIVE,
// Wait up to 60s for the cluster state to update and all shards to be
// started
timeout: DEFAULT_TIMEOUT,
body: {
mappings,
aliases: aliasesObject,
settings: {
index: {
// ES rule of thumb: shards should be several GB to 10's of GB, so
// Kibana is unlikely to cross that limit.
number_of_shards: 1,
auto_expand_replicas: INDEX_AUTO_EXPAND_REPLICAS,
// Set an explicit refresh interval so that we don't inherit the
// value from incorrectly configured index templates (not required
// after we adopt system indices)
refresh_interval: '1s',
// Bump priority so that recovery happens before newer indices
priority: 10,
},
},
},
{ maxRetries: 0 /** handle retry ourselves for now */ }
)
.then((res) => {
},
{ maxRetries: 0 /** handle retry ourselves for now */ }
)
.then((res) => {
/**
* - acknowledged=false, we timed out before the cluster state was
* updated on all nodes with the newly created index, but it
* probably will be created sometime soon.
* - shards_acknowledged=false, we timed out before all shards were
* started
* - acknowledged=true, shards_acknowledged=true, index creation complete
*/
return Either.right({
acknowledged: Boolean(res.body.acknowledged),
shardsAcknowledged: res.body.shards_acknowledged,
});
})
.catch((error) => {
if (error?.body?.error?.type === 'resource_already_exists_exception') {
/**
* - acknowledged=false, we timed out before the cluster state was
* updated on all nodes with the newly created index, but it
* probably will be created sometime soon.
* - shards_acknowledged=false, we timed out before all shards were
* started
* - acknowledged=true, shards_acknowledged=true, index creation complete
* If the target index already exists it means a previous create
* operation had already been started. However, we can't be sure
* that all shards were started so return shardsAcknowledged: false
*/
return Either.right({
acknowledged: Boolean(res.body.acknowledged),
shardsAcknowledged: res.body.shards_acknowledged,
acknowledged: true,
shardsAcknowledged: false,
});
})
.catch((error) => {
if (error?.body?.error?.type === 'resource_already_exists_exception') {
/**
* If the target index already exists it means a previous create
* operation had already been started. However, we can't be sure
* that all shards were started so return shardsAcknowledged: false
*/
return Either.right({
acknowledged: true,
shardsAcknowledged: false,
});
} else {
throw error;
}
})
.catch(catchRetryableEsClientErrors);
};
} else {
throw error;
}
})
.catch(catchRetryableEsClientErrors);
};

return pipe(
createIndexTask,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,33 +41,35 @@ export const updateAndPickupMappings = ({
RetryableEsClientError,
UpdateAndPickupMappingsResponse
> => {
const putMappingTask: TaskEither.TaskEither<RetryableEsClientError, 'update_mappings_succeeded'> =
() => {
return client.indices
.putMapping({
index,
timeout: DEFAULT_TIMEOUT,
body: mappings,
})
.then((res) => {
// Ignore `acknowledged: false`. When the coordinating node accepts
// the new cluster state update but not all nodes have applied the
// update within the timeout `acknowledged` will be false. However,
// retrying this update will always immediately result in `acknowledged:
// true` even if there are still nodes which are falling behind with
// cluster state updates.
// For updateAndPickupMappings this means that there is the potential
// that some existing document's fields won't be picked up if the node
// on which the Kibana shard is running has fallen behind with cluster
// state updates and the mapping update wasn't applied before we run
// `pickupUpdatedMappings`. ES tries to limit this risk by blocking
// index operations (including update_by_query used by
// updateAndPickupMappings) if there are pending mappings changes. But
// not all mapping changes will prevent this.
return Either.right('update_mappings_succeeded' as const);
})
.catch(catchRetryableEsClientErrors);
};
const putMappingTask: TaskEither.TaskEither<
RetryableEsClientError,
'update_mappings_succeeded'
> = () => {
return client.indices
.putMapping({
index,
timeout: DEFAULT_TIMEOUT,
body: mappings,
})
.then((res) => {
// Ignore `acknowledged: false`. When the coordinating node accepts
// the new cluster state update but not all nodes have applied the
// update within the timeout `acknowledged` will be false. However,
// retrying this update will always immediately result in `acknowledged:
// true` even if there are still nodes which are falling behind with
// cluster state updates.
// For updateAndPickupMappings this means that there is the potential
// that some existing document's fields won't be picked up if the node
// on which the Kibana shard is running has fallen behind with cluster
// state updates and the mapping update wasn't applied before we run
// `pickupUpdatedMappings`. ES tries to limit this risk by blocking
// index operations (including update_by_query used by
// updateAndPickupMappings) if there are pending mappings changes. But
// not all mapping changes will prevent this.
return Either.right('update_mappings_succeeded' as const);
})
.catch(catchRetryableEsClientErrors);
};

return pipe(
putMappingTask,
Expand Down
Loading

0 comments on commit 8bb030c

Please sign in to comment.