Update integration test (#1493)
This commit is contained in:
committed by
delvedor
parent
28d8ff799e
commit
9baa42ac1b
@ -7,7 +7,7 @@
|
||||
# Export the TEST_SUITE variable, eg. 'free' or 'platinum' defaults to 'free'.
|
||||
# Export the NUMBER_OF_NODES variable to start more than 1 node
|
||||
|
||||
# Version 1.2.0
|
||||
# Version 1.4.0
|
||||
# - Initial version of the run-elasticsearch.sh script
|
||||
# - Deleting the volume should not dependent on the container still running
|
||||
# - Fixed `ES_JAVA_OPTS` config
|
||||
@ -15,6 +15,9 @@
|
||||
# - Refactored into functions and imports
|
||||
# - Support NUMBER_OF_NODES
|
||||
# - Added 5 retries on docker pull for fixing transient network errors
|
||||
# - Added flags to make local CCR configurations work
|
||||
# - Added action.destructive_requires_name=false as the default will be true in v8
|
||||
# - Added ingest.geoip.downloader.enabled=false as it causes false positives in testing
|
||||
|
||||
script_path=$(dirname $(realpath -s $0))
|
||||
source $script_path/functions/imports.sh
|
||||
@ -37,6 +40,8 @@ environment=($(cat <<-END
|
||||
--env node.attr.testattr=test
|
||||
--env path.repo=/tmp
|
||||
--env repositories.url.allowed_urls=http://snapshot.test*
|
||||
--env action.destructive_requires_name=false
|
||||
--env ingest.geoip.downloader.enabled=false
|
||||
END
|
||||
))
|
||||
if [[ "$TEST_SUITE" == "platinum" ]]; then
|
||||
@ -50,6 +55,7 @@ if [[ "$TEST_SUITE" == "platinum" ]]; then
|
||||
--env xpack.security.http.ssl.certificate=certs/testnode.crt
|
||||
--env xpack.security.http.ssl.certificate_authorities=certs/ca.crt
|
||||
--env xpack.security.transport.ssl.enabled=true
|
||||
--env xpack.security.transport.ssl.verification_mode=certificate
|
||||
--env xpack.security.transport.ssl.key=certs/testnode.key
|
||||
--env xpack.security.transport.ssl.certificate=certs/testnode.crt
|
||||
--env xpack.security.transport.ssl.certificate_authorities=certs/ca.crt
|
||||
@ -104,7 +110,7 @@ END
|
||||
docker run \
|
||||
--name "$node_name" \
|
||||
--network "$network_name" \
|
||||
--env "ES_JAVA_OPTS=-Xms1g -Xmx1g" \
|
||||
--env "ES_JAVA_OPTS=-Xms1g -Xmx1g -da:org.elasticsearch.xpack.ccr.index.engine.FollowingEngineAssertions" \
|
||||
"${environment[@]}" \
|
||||
"${volumes[@]}" \
|
||||
--publish "$http_port":9200 \
|
||||
|
||||
@ -47,6 +47,8 @@ const freeSkips = {
|
||||
// TODO: remove this once 'arbitrary_key' is implemented
|
||||
// https://github.com/elastic/elasticsearch/pull/41492
|
||||
'indices.split/30_copy_settings.yml': ['*'],
|
||||
'indices.stats/50_disk_usage.yml': ['Disk usage stats'],
|
||||
'indices.stats/60_field_usage.yml': ['Field usage stats'],
|
||||
// skipping because we are booting ES with `discovery.type=single-node`
|
||||
// and this test will fail because of this configuration
|
||||
'nodes.stats/30_discovery.yml': ['*'],
|
||||
@ -55,6 +57,7 @@ const freeSkips = {
|
||||
'search.aggregation/240_max_buckets.yml': ['*']
|
||||
}
|
||||
const platinumBlackList = {
|
||||
'analytics/histogram.yml': ['Histogram requires values in increasing order'],
|
||||
// this two test cases are broken, we should
|
||||
// return on those in the future.
|
||||
'analytics/top_metrics.yml': [
|
||||
|
||||
@ -80,6 +80,18 @@ function build (opts = {}) {
|
||||
|
||||
// remove 'x_pack_rest_user', used in some xpack test
|
||||
await client.security.deleteUser({ username: 'x_pack_rest_user' }, { ignore: [404] })
|
||||
|
||||
const { body: searchableSnapshotIndices } = await client.cluster.state({
|
||||
metric: 'metadata',
|
||||
filter_path: 'metadata.indices.*.settings.index.store.snapshot'
|
||||
})
|
||||
if (searchableSnapshotIndices.metadata != null && searchableSnapshotIndices.metadata.indices != null) {
|
||||
await helper.runInParallel(
|
||||
client, 'indices.delete',
|
||||
Object.keys(searchableSnapshotIndices.metadata.indices).map(i => ({ index: i })),
|
||||
{ ignore: [404] }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// clean snapshots
|
||||
@ -164,6 +176,13 @@ function build (opts = {}) {
|
||||
)
|
||||
}
|
||||
|
||||
const { body: shutdownNodes } = await client.shutdown.getNode()
|
||||
if (shutdownNodes._nodes == null && shutdownNodes.cluster_name == null) {
|
||||
for (const node of shutdownNodes.nodes) {
|
||||
await client.shutdown.deleteNode({ node_id: node.node_id })
|
||||
}
|
||||
}
|
||||
|
||||
// wait for pending task before resolving the promise
|
||||
await sleep(100)
|
||||
while (true) {
|
||||
|
||||
Reference in New Issue
Block a user