Move make.sh to .buildkite (#2165)

This commit is contained in:
Josh Mock
2024-03-21 12:07:24 -05:00
committed by GitHub
parent 29a0e53978
commit d430aecdbd
12 changed files with 12 additions and 142 deletions

View File

@ -2,7 +2,7 @@
# #
# Shared cleanup routines between different steps # Shared cleanup routines between different steps
# #
# Please source .ci/functions/imports.sh as a whole not just this file # Please source .buildkite/functions/imports.sh as a whole not just this file
# #
# Version 1.0.0 # Version 1.0.0
# - Initial version after refactor # - Initial version after refactor

View File

@ -2,7 +2,7 @@
# #
# Exposes a routine scripts can call to wait for a container if that container set up a health command # Exposes a routine scripts can call to wait for a container if that container set up a health command
# #
# Please source .ci/functions/imports.sh as a whole not just this file # Please source .buildkite/functions/imports.sh as a whole not just this file
# #
# Version 1.0.1 # Version 1.0.1
# - Initial version after refactor # - Initial version after refactor

View File

@ -3,7 +3,7 @@
# #
# Build entry script for elasticsearch-js # Build entry script for elasticsearch-js
# #
# Must be called: ./.ci/make.sh <target> <params> # Must be called: ./.buildkite/make.sh <target> <params>
# #
# Version: 1.1.0 # Version: 1.1.0
# #
@ -34,8 +34,8 @@ STACK_VERSION=$VERSION
set -euo pipefail set -euo pipefail
product="elastic/elasticsearch-js" product="elastic/elasticsearch-js"
output_folder=".ci/output" output_folder=".buildkite/output"
codegen_folder=".ci/output" codegen_folder=".buildkite/output"
OUTPUT_DIR="$repo/${output_folder}" OUTPUT_DIR="$repo/${output_folder}"
NODE_JS_VERSION=18 NODE_JS_VERSION=18
WORKFLOW=${WORKFLOW-staging} WORKFLOW=${WORKFLOW-staging}
@ -131,7 +131,7 @@ esac
echo -e "\033[34;1mINFO: building $product container\033[0m" echo -e "\033[34;1mINFO: building $product container\033[0m"
docker build \ docker build \
--file .ci/Dockerfile \ --file .buildkite/Dockerfile-make \
--tag "$product" \ --tag "$product" \
--build-arg NODE_JS_VERSION="$NODE_JS_VERSION" \ --build-arg NODE_JS_VERSION="$NODE_JS_VERSION" \
--build-arg "BUILDER_UID=$(id -u)" \ --build-arg "BUILDER_UID=$(id -u)" \
@ -156,7 +156,7 @@ if [[ -z "${BUILDKITE+x}" ]] && [[ -z "${CI+x}" ]] && [[ -z "${GITHUB_ACTIONS+x}
--rm \ --rm \
$product \ $product \
/bin/bash -c "mkdir -p /usr/src/elastic-client-generator-js/output && \ /bin/bash -c "mkdir -p /usr/src/elastic-client-generator-js/output && \
node .ci/make.mjs --task $TASK ${TASK_ARGS[*]}" node .buildkite/make.mjs --task $TASK ${TASK_ARGS[*]}"
else else
echo -e "\033[34;1mINFO: Running in CI mode" echo -e "\033[34;1mINFO: Running in CI mode"
docker run \ docker run \
@ -171,7 +171,7 @@ else
git clone https://$CLIENTS_GITHUB_TOKEN@github.com/elastic/elastic-client-generator-js.git && \ git clone https://$CLIENTS_GITHUB_TOKEN@github.com/elastic/elastic-client-generator-js.git && \
mkdir -p /usr/src/elastic-client-generator-js/output && \ mkdir -p /usr/src/elastic-client-generator-js/output && \
cd /usr/src/elasticsearch-js && \ cd /usr/src/elasticsearch-js && \
node .ci/make.mjs --task $TASK ${TASK_ARGS[*]}" node .buildkite/make.mjs --task $TASK ${TASK_ARGS[*]}"
fi fi
# ------------------------------------------------------- # # ------------------------------------------------------- #
@ -179,7 +179,7 @@ fi
# ------------------------------------------------------- # # ------------------------------------------------------- #
if [[ "$CMD" == "assemble" ]]; then if [[ "$CMD" == "assemble" ]]; then
if compgen -G ".ci/output/*" > /dev/null; then if compgen -G ".buildkite/output/*" > /dev/null; then
echo -e "\033[32;1mTARGET: successfully assembled client v$VERSION\033[0m" echo -e "\033[32;1mTARGET: successfully assembled client v$VERSION\033[0m"
else else
echo -e "\033[31;1mTARGET: assemble failed, empty workspace!\033[0m" echo -e "\033[31;1mTARGET: assemble failed, empty workspace!\033[0m"

View File

@ -9,7 +9,6 @@
"\\.md$", "\\.md$",
"\\.asciidoc$", "\\.asciidoc$",
"^docs\\/", "^docs\\/",
"^\\.ci\\/",
"^scripts\\/", "^scripts\\/",
"^catalog-info\\.yaml$", "^catalog-info\\.yaml$",
"^test\\/unit\\/", "^test\\/unit\\/",

View File

@ -17,7 +17,7 @@ jobs:
with: with:
filters: | filters: |
src-only: src-only:
- '!(**/*.{md,asciidoc,txt}|*.{md,asciidoc,txt}|{docs,.ci,.buildkite,scripts}/**/*|catalog-info.yaml)' - '!(**/*.{md,asciidoc,txt}|*.{md,asciidoc,txt}|{docs,.buildkite,scripts}/**/*|catalog-info.yaml)'
- '.github/workflows/**' - '.github/workflows/**'
test: test:

View File

@ -64,7 +64,6 @@ test
scripts scripts
# ci configuration # ci configuration
.ci
.travis.yml .travis.yml
.buildkite .buildkite
certs certs

View File

@ -1,11 +0,0 @@
.PHONY: integration-setup
integration-setup: integration-cleanup
DETACH=true .ci/run-elasticsearch.sh
.PHONY: integration-cleanup
integration-cleanup:
docker container rm --force --volumes instance || true
.PHONY: integration
integration: integration-setup
npm run test:integration

View File

@ -1,77 +0,0 @@
#!/bin/bash
# Images are cached locally, it may be needed
# to delete an old image and download again
# the latest snapshot.
repo=$(pwd)
testnodecrt="/.ci/certs/testnode.crt"
testnodekey="/.ci/certs/testnode.key"
cacrt="/.ci/certs/ca.crt"
# pass `--clean` to reemove the old snapshot
if [ "$1" == "--clean" ]; then
docker rmi $(docker images --format '{{.Repository}}:{{.Tag}}' | grep '8.0.0-SNAPSHOT')
fi
# Create the 'elastic' network if doesn't exist
exec docker network ls | grep elastic > /dev/null || docker network create elastic > /dev/null
if [ "$1" == "--detach" ]; then
exec docker run \
--rm \
-e "node.attr.testattr=test" \
-e "path.repo=/tmp" \
-e "repositories.url.allowed_urls=http://snapshot.*" \
-e "discovery.type=single-node" \
-e "action.destructive_requires_name=false" \
-e "ES_JAVA_OPTS=-Xms1g -Xmx1g" \
-e "ELASTIC_PASSWORD=changeme" \
-e "xpack.security.enabled=true" \
-e "xpack.license.self_generated.type=trial" \
-e "xpack.security.http.ssl.enabled=true" \
-e "xpack.security.http.ssl.verification_mode=certificate" \
-e "xpack.security.http.ssl.key=certs/testnode.key" \
-e "xpack.security.http.ssl.certificate=certs/testnode.crt" \
-e "xpack.security.http.ssl.certificate_authorities=certs/ca.crt" \
-e "xpack.security.transport.ssl.enabled=true" \
-e "xpack.security.transport.ssl.key=certs/testnode.key" \
-e "xpack.security.transport.ssl.certificate=certs/testnode.crt" \
-e "xpack.security.transport.ssl.certificate_authorities=certs/ca.crt" \
-v "$repo$testnodecrt:/usr/share/elasticsearch/config/certs/testnode.crt" \
-v "$repo$testnodekey:/usr/share/elasticsearch/config/certs/testnode.key" \
-v "$repo$cacrt:/usr/share/elasticsearch/config/certs/ca.crt" \
-p 9200:9200 \
--detach \
--network=elastic \
--name=elasticsearch \
docker.elastic.co/elasticsearch/elasticsearch:8.0.0-SNAPSHOT
else
exec docker run \
--rm \
-e "node.attr.testattr=test" \
-e "path.repo=/tmp" \
-e "repositories.url.allowed_urls=http://snapshot.*" \
-e "discovery.type=single-node" \
-e "action.destructive_requires_name=false" \
-e "ES_JAVA_OPTS=-Xms1g -Xmx1g" \
-e "ELASTIC_PASSWORD=changeme" \
-e "xpack.security.enabled=true" \
-e "xpack.license.self_generated.type=trial" \
-e "xpack.security.http.ssl.enabled=true" \
-e "xpack.security.http.ssl.verification_mode=certificate" \
-e "xpack.security.http.ssl.key=certs/testnode.key" \
-e "xpack.security.http.ssl.certificate=certs/testnode.crt" \
-e "xpack.security.http.ssl.certificate_authorities=certs/ca.crt" \
-e "xpack.security.transport.ssl.enabled=true" \
-e "xpack.security.transport.ssl.key=certs/testnode.key" \
-e "xpack.security.transport.ssl.certificate=certs/testnode.crt" \
-e "xpack.security.transport.ssl.certificate_authorities=certs/ca.crt" \
-v "$repo$testnodecrt:/usr/share/elasticsearch/config/certs/testnode.crt" \
-v "$repo$testnodekey:/usr/share/elasticsearch/config/certs/testnode.key" \
-v "$repo$cacrt:/usr/share/elasticsearch/config/certs/ca.crt" \
-p 9200:9200 \
--network=elastic \
--name=elasticsearch \
docker.elastic.co/elasticsearch/elasticsearch:8.0.0-SNAPSHOT
fi

View File

@ -1,40 +0,0 @@
#!/bin/bash
# Images are cached locally, it may be needed
# to delete an old image and download again
# the latest snapshot.
# pass `--clean` to reemove the old snapshot
if [ "$1" == "--clean" ]; then
docker rmi $(docker images --format '{{.Repository}}:{{.Tag}}' | grep '8.0.0-SNAPSHOT')
fi
# Create the 'elastic' network if doesn't exist
exec docker network ls | grep elastic > /dev/null || docker network create elastic > /dev/null
if [ "$1" == "--detach" ]; then
exec docker run \
--rm \
-e "node.attr.testattr=test" \
-e "path.repo=/tmp" \
-e "repositories.url.allowed_urls=http://snapshot.*" \
-e "discovery.type=single-node" \
-e "action.destructive_requires_name=false" \
-p 9200:9200 \
--detach \
--network=elastic \
--name=elasticsearch \
docker.elastic.co/elasticsearch/elasticsearch:8.0.0-SNAPSHOT
else
exec docker run \
--rm \
-e "node.attr.testattr=test" \
-e "path.repo=/tmp" \
-e "repositories.url.allowed_urls=http://snapshot.*" \
-e "discovery.type=single-node" \
-e "action.destructive_requires_name=false" \
-p 9200:9200 \
--network=elastic \
--name=elasticsearch \
docker.elastic.co/elasticsearch/elasticsearch:8.0.0-SNAPSHOT
fi

View File

@ -23,7 +23,7 @@ The specification does not allow the test to be run in parallel, so it might tak
### Running locally ### Running locally
If you want to run the integration tests on your development machine, you must have an Elasticsearch instance running first. If you want to run the integration tests on your development machine, you must have an Elasticsearch instance running first.
A local instance can be spun up in a Docker container by running the [`.ci/run-elasticsearch.sh`](/.ci/run-elasticsearch.sh) script. A local instance can be spun up in a Docker container by running the [`.buildkite/run-elasticsearch.sh`](/.buildkite/run-elasticsearch.sh) script.
This is the same script CI jobs use to run Elasticsearch for integration tests, so your results should be relatively consistent. This is the same script CI jobs use to run Elasticsearch for integration tests, so your results should be relatively consistent.
To simplify the process of starting a container, testing, and cleaning up the container, you can run the `make integration` target: To simplify the process of starting a container, testing, and cleaning up the container, you can run the `make integration` target:
@ -35,7 +35,7 @@ export TEST_SUITE=free # can be `free` or `platinum`
make integration make integration
``` ```
If Elasticsearch doesn't come up, run `make integration-cleanup` and then `DETACH=false .ci/run-elasticsearch.sh` manually to read the startup logs. If Elasticsearch doesn't come up, run `make integration-cleanup` and then `DETACH=false .buildkite/run-elasticsearch.sh` manually to read the startup logs.
If you get an error about `vm.max_map_count` being too low, run `sudo sysctl -w vm.max_map_count=262144` to update the setting until the next reboot, or `sudo sysctl -w vm.max_map_count=262144; echo 'vm.max_map_count=262144' | sudo tee -a /etc/sysctl.conf` to update the setting permanently. If you get an error about `vm.max_map_count` being too low, run `sudo sysctl -w vm.max_map_count=262144` to update the setting until the next reboot, or `sudo sysctl -w vm.max_map_count=262144; echo 'vm.max_map_count=262144' | sudo tee -a /etc/sysctl.conf` to update the setting permanently.