Compare commits
26 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 7aebd6b980 | |||
| 5a0bcfd83f | |||
| e23cc725f6 | |||
| 59f7204183 | |||
| e4edfde4f7 | |||
| eec7439514 | |||
| 6610cf3638 | |||
| af4b4d4bb6 | |||
| 9313a08733 | |||
| 77f4cb6c02 | |||
| 6c13dc4f7b | |||
| 4143c4c759 | |||
| 0980a51cfe | |||
| e4da7c1dbd | |||
| d04e1a479c | |||
| c0cf2437fd | |||
| d798b2cd37 | |||
| 1927aa85f7 | |||
| 0716d3b4e2 | |||
| 6dd9d2737c | |||
| 4170fc702b | |||
| dfc0556c4b | |||
| dbe22f284a | |||
| 2615338008 | |||
| c6ff0dee17 | |||
| 00fef28230 |
185
.ci/Jenkinsfile
vendored
Normal file
185
.ci/Jenkinsfile
vendored
Normal file
@ -0,0 +1,185 @@
|
||||
#!/usr/bin/env groovy
|
||||
|
||||
@Library('apm@current') _
|
||||
|
||||
def NODE_JS_VERSIONS = [8,10,12]
|
||||
def nodeJsVersion = NODE_JS_VERSIONS[randomNumber(min: 0, max:2)]
|
||||
|
||||
pipeline {
|
||||
agent {
|
||||
label 'docker && immutable'
|
||||
}
|
||||
|
||||
environment {
|
||||
REPO = 'elasticsearch-js'
|
||||
BASE_DIR = "src/github.com/elastic/${env.REPO}"
|
||||
NODE_JS_DEFAULT_VERSION = "${nodeJsVersion}"
|
||||
NODE_JS_VERSIONS = "${NODE_JS_VERSIONS.join(',')}"
|
||||
HOME = "${env.WORKSPACE}"
|
||||
npm_config_cache = 'npm-cache'
|
||||
}
|
||||
|
||||
options {
|
||||
timeout(time: 1, unit: 'HOURS')
|
||||
buildDiscarder(logRotator(numToKeepStr: '20', artifactNumToKeepStr: '20', daysToKeepStr: '30'))
|
||||
timestamps()
|
||||
ansiColor('xterm')
|
||||
disableResume()
|
||||
durabilityHint('PERFORMANCE_OPTIMIZED')
|
||||
}
|
||||
|
||||
triggers {
|
||||
issueCommentTrigger('(?i).*(?:jenkins\\W+)?run\\W+(?:the\\W+)?tests(?:\\W+please)?.*')
|
||||
// env.CHANGE_ID as a value in case of a commit or a pr, which means
|
||||
// that we will have a daily cron job only for branches that don't have an active pr
|
||||
cron(env.CHANGE_ID ? '' : '@daily')
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Checkout') {
|
||||
options { skipDefaultCheckout() }
|
||||
steps {
|
||||
deleteDir()
|
||||
gitCheckout(basedir: "${BASE_DIR}", githubNotifyFirstTimeContributor: false)
|
||||
stash allowEmpty: true, name: 'source', useDefaultExcludes: false
|
||||
}
|
||||
}
|
||||
|
||||
stage('Install dependencies') {
|
||||
options { skipDefaultCheckout() }
|
||||
steps {
|
||||
deleteDir()
|
||||
unstash 'source'
|
||||
script {
|
||||
buildDockerImage(image: "node:${env.NODE_JS_DEFAULT_VERSION}-alpine").inside(){
|
||||
dir("${BASE_DIR}"){
|
||||
sh(label: 'System info', script: 'node --version; npm --version')
|
||||
sh(label: 'Install dependencies', script: 'npm install')
|
||||
}
|
||||
}
|
||||
}
|
||||
stash allowEmpty: true, name: 'source-dependencies', useDefaultExcludes: false
|
||||
}
|
||||
}
|
||||
|
||||
stage('License check') {
|
||||
options { skipDefaultCheckout() }
|
||||
steps {
|
||||
withGithubNotify(context: 'License check') {
|
||||
deleteDir()
|
||||
unstash 'source-dependencies'
|
||||
script {
|
||||
buildDockerImage(image: "node:${env.NODE_JS_DEFAULT_VERSION}-alpine").inside(){
|
||||
dir("${BASE_DIR}"){
|
||||
sh(label: 'Check production dependencies licenses', script: 'npm run license-checker')
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage('Linter') {
|
||||
options { skipDefaultCheckout() }
|
||||
steps {
|
||||
withGithubNotify(context: 'Linter') {
|
||||
deleteDir()
|
||||
unstash 'source-dependencies'
|
||||
script {
|
||||
buildDockerImage(image: "node:${env.NODE_JS_DEFAULT_VERSION}-alpine").inside(){
|
||||
dir("${BASE_DIR}"){
|
||||
sh(label: 'Lint code with standardjs', script: 'npm run lint')
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage('Unit test') {
|
||||
failFast true
|
||||
options { skipDefaultCheckout() }
|
||||
steps {
|
||||
withGithubNotify(context: 'Unit test') {
|
||||
script {
|
||||
def versions = env.NODE_JS_VERSIONS.split(',')
|
||||
def parallelTasks = [:]
|
||||
versions.each{ version ->
|
||||
parallelTasks["Node.js v${version}"] = buildUnitTest(version: version)
|
||||
}
|
||||
parallel(parallelTasks)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage('Integration test') {
|
||||
options { skipDefaultCheckout() }
|
||||
environment {
|
||||
TEST_ES_SERVER = 'http://elasticsearch:9200'
|
||||
}
|
||||
steps {
|
||||
withGithubNotify(context: 'Integration test OSS') {
|
||||
deleteDir()
|
||||
unstash 'source-dependencies'
|
||||
dir("${BASE_DIR}"){
|
||||
// Sometimes the docker registry fails and has random timeouts
|
||||
// this block will retry a doker image 3 times before to fail.
|
||||
retry(3) {
|
||||
sleep randomNumber(min: 5, max: 10)
|
||||
sh(label: 'Start Elasticsearch', script: './scripts/es-docker.sh --detach')
|
||||
}
|
||||
}
|
||||
script {
|
||||
buildDockerImage(fromDockerfile: true).inside('--network=elastic'){
|
||||
dir("${BASE_DIR}"){
|
||||
sh(label: 'Integration test', script: 'npm run test:integration | tee test-integration.tap')
|
||||
sh(label: 'Generating test reporting', script: './node_modules/.bin/tap-mocha-reporter xunit < test-integration.tap > junit-integration.xml')
|
||||
}
|
||||
}
|
||||
}
|
||||
sh(label: 'Stop Elasticsearch', script: 'docker kill $(docker ps -q)')
|
||||
junit(allowEmptyResults: true, keepLongStdio: true, testResults: "${BASE_DIR}/**/junit-*.xml")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sometimes the docker registry fails and has random timeouts
|
||||
// this function will retry a doker image 3 times before to fail.
|
||||
def buildDockerImage(args) {
|
||||
def image
|
||||
retry(3) {
|
||||
sleep randomNumber(min: 5, max: 10)
|
||||
if (args.fromDockerfile == true) {
|
||||
image = docker.build('nodejs-image', "--build-arg NODE_JS_VERSION=${env.NODE_JS_DEFAULT_VERSION} ${BASE_DIR}/.ci/docker")
|
||||
} else {
|
||||
image = docker.image(args.image)
|
||||
// make sure we have the latest available from Docker Hub
|
||||
image.pull()
|
||||
}
|
||||
}
|
||||
return image
|
||||
}
|
||||
|
||||
def buildUnitTest(args) {
|
||||
return {
|
||||
node('docker && immutable') {
|
||||
deleteDir()
|
||||
unstash 'source'
|
||||
script {
|
||||
buildDockerImage(image: "node:${args.version}-alpine").inside(){
|
||||
dir("${BASE_DIR}"){
|
||||
sh(label: 'Install dependencies', script: 'npm install')
|
||||
sh(label: 'Run unit test', script: 'npm run test:unit | tee test-unit.tap')
|
||||
sh(label: 'Run behavior test', script: 'npm run test:behavior | tee test-behavior.tap')
|
||||
sh(label: 'Run types test', script: 'npm run test:types')
|
||||
sh(label: 'Generating test reporting', script: './node_modules/.bin/tap-mocha-reporter xunit < test-unit.tap > junit-unit.xml; ./node_modules/.bin/tap-mocha-reporter xunit < test-behavior.tap > junit-behavior.xml')
|
||||
}
|
||||
}
|
||||
}
|
||||
junit(allowEmptyResults: true, keepLongStdio: true, testResults: "${BASE_DIR}/**/junit-*.xml")
|
||||
}
|
||||
}
|
||||
}
|
||||
7
.ci/docker/Dockerfile
Normal file
7
.ci/docker/Dockerfile
Normal file
@ -0,0 +1,7 @@
|
||||
ARG NODE_JS_VERSION=10
|
||||
FROM node:${NODE_JS_VERSION}-alpine
|
||||
|
||||
RUN apk --no-cache add git
|
||||
|
||||
# Create app directory
|
||||
WORKDIR /usr/src/app
|
||||
@ -47,6 +47,10 @@
|
||||
type: yaml
|
||||
filename: .ci/test-matrix.yml
|
||||
name: NODE_JS_VERSION
|
||||
- axis:
|
||||
type: yaml
|
||||
filename: .ci/test-matrix.yml
|
||||
name: TEST_SUITE
|
||||
yaml-strategy:
|
||||
exclude-key: exclude
|
||||
filename: .ci/test-matrix.yml
|
||||
|
||||
14
.ci/packer_cache.sh
Normal file
14
.ci/packer_cache.sh
Normal file
@ -0,0 +1,14 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source /usr/local/bin/bash_standard_lib.sh
|
||||
|
||||
DOCKER_IMAGES="node:12-alpine
|
||||
node:10-alpine
|
||||
node:8-alpine
|
||||
"
|
||||
|
||||
for di in ${DOCKER_IMAGES}
|
||||
do
|
||||
(retry 2 docker pull "${di}") || echo "Error pulling ${di} Docker image, we continue"
|
||||
done
|
||||
|
||||
177
.ci/run-elasticsearch.sh
Normal file
177
.ci/run-elasticsearch.sh
Normal file
@ -0,0 +1,177 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Launch one or more Elasticsearch nodes via the Docker image,
|
||||
# to form a cluster suitable for running the REST API tests.
|
||||
#
|
||||
# Export the ELASTICSEARCH_VERSION variable, eg. 'elasticsearch:8.0.0-SNAPSHOT'.
|
||||
|
||||
if [[ -z "$ELASTICSEARCH_VERSION" ]]; then
|
||||
echo -e "\033[31;1mERROR:\033[0m Required environment variable [ELASTICSEARCH_VERSION] not set\033[0m"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
moniker=$(echo "$ELASTICSEARCH_VERSION" | tr -C "[:alnum:]" '-')
|
||||
suffix=rest-test
|
||||
|
||||
NODE_NAME=${NODE_NAME-${moniker}node1}
|
||||
MASTER_NODE_NAME=${MASTER_NODE_NAME-${NODE_NAME}}
|
||||
CLUSTER_NAME=${CLUSTER_NAME-${moniker}${suffix}}
|
||||
HTTP_PORT=${HTTP_PORT-9200}
|
||||
|
||||
ELASTIC_PASSWORD=${ELASTIC_PASSWORD-changeme}
|
||||
SSL_CERT=${SSL_CERT-"$PWD/certs/testnode.crt"}
|
||||
SSL_KEY=${SSL_KEY-"$PWD/certs/testnode.key"}
|
||||
SSL_CA=${SSL_CA-"$PWD/certs/ca.crt"}
|
||||
|
||||
DETACH=${DETACH-false}
|
||||
CLEANUP=${CLEANUP-false}
|
||||
|
||||
volume_name=${NODE_NAME}-${suffix}-data
|
||||
network_default=${moniker}${suffix}
|
||||
NETWORK_NAME=${NETWORK_NAME-"$network_default"}
|
||||
|
||||
set +x
|
||||
|
||||
function cleanup_volume {
|
||||
if [[ "$(docker volume ls -q -f name=$1)" ]]; then
|
||||
echo -e "\033[34;1mINFO:\033[0m Removing volume $1\033[0m"
|
||||
(docker volume rm "$1") || true
|
||||
fi
|
||||
}
|
||||
function cleanup_node {
|
||||
if [[ "$(docker ps -q -f name=$1)" ]]; then
|
||||
echo -e "\033[34;1mINFO:\033[0m Removing container $1\033[0m"
|
||||
(docker container rm --force --volumes "$1") || true
|
||||
cleanup_volume "$1-${suffix}-data"
|
||||
fi
|
||||
}
|
||||
function cleanup_network {
|
||||
if [[ "$(docker network ls -q -f name=$1)" ]]; then
|
||||
echo -e "\033[34;1mINFO:\033[0m Removing network $1\033[0m"
|
||||
(docker network rm "$1") || true
|
||||
fi
|
||||
}
|
||||
|
||||
function cleanup {
|
||||
if [[ "$DETACH" != "true" ]] || [[ "$1" == "1" ]]; then
|
||||
echo -e "\033[34;1mINFO:\033[0m clean the node and volume on startup (1) OR on exit if not detached\033[0m"
|
||||
cleanup_node "$NODE_NAME"
|
||||
fi
|
||||
if [[ "$DETACH" != "true" ]]; then
|
||||
echo -e "\033[34;1mINFO:\033[0m clean the network if not detached (start and exit)\033[0m"
|
||||
cleanup_network "$NETWORK_NAME"
|
||||
fi
|
||||
};
|
||||
trap "cleanup 0" EXIT
|
||||
|
||||
if [[ "$CLEANUP" == "true" ]]; then
|
||||
trap - EXIT
|
||||
if [[ -z "$(docker network ls -q -f name=${NETWORK_NAME})" ]]; then
|
||||
echo -e "\033[34;1mINFO:\033[0m $NETWORK_NAME is already deleted\033[0m"
|
||||
exit 0
|
||||
fi
|
||||
containers=$(docker network inspect -f '{{ range $key, $value := .Containers }}{{ printf "%s\n" .Name}}{{ end }}' ${NETWORK_NAME})
|
||||
while read -r container; do
|
||||
cleanup_node "$container"
|
||||
done <<< "$containers"
|
||||
cleanup_network "$NETWORK_NAME"
|
||||
echo -e "\033[32;1mSUCCESS:\033[0m Cleaned up and exiting\033[0m"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo -e "\033[34;1mINFO:\033[0m Making sure previous run leftover infrastructure is removed \033[0m"
|
||||
cleanup 1
|
||||
|
||||
echo -e "\033[34;1mINFO:\033[0m Creating network $NETWORK_NAME if it does not exist already \033[0m"
|
||||
docker network inspect "$NETWORK_NAME" > /dev/null 2>&1 || docker network create "$NETWORK_NAME"
|
||||
|
||||
environment=($(cat <<-END
|
||||
--env discovery.zen.ping.unicast.hosts=${NODE_NAME}
|
||||
--env xpack.security.enabled=false
|
||||
--env xpack.monitoring.enabled=false
|
||||
--env xpack.ml.enabled=false
|
||||
--env cluster.routing.allocation.disk.threshold_enabled=false
|
||||
--env bootstrap.memory_lock=true
|
||||
--env node.attr.testattr=test
|
||||
--env path.repo=/tmp
|
||||
--env repositories.url.allowed_urls=http://snapshot.test*
|
||||
END
|
||||
))
|
||||
|
||||
volumes=($(cat <<-END
|
||||
--volume $volume_name:/usr/share/elasticsearch/data
|
||||
END
|
||||
))
|
||||
|
||||
# if [[ "$ELASTICSEARCH_VERSION" != *oss* ]]; then
|
||||
# environment+=($(cat <<-END
|
||||
# --env ELASTIC_PASSWORD=$ELASTIC_PASSWORD
|
||||
# --env xpack.license.self_generated.type=trial
|
||||
# --env xpack.security.enabled=true
|
||||
# --env xpack.security.http.ssl.enabled=true
|
||||
# --env xpack.security.http.ssl.verification_mode=certificate
|
||||
# --env xpack.security.http.ssl.key=certs/testnode.key
|
||||
# --env xpack.security.http.ssl.certificate=certs/testnode.crt
|
||||
# --env xpack.security.http.ssl.certificate_authorities=certs/ca.crt
|
||||
# --env xpack.security.transport.ssl.enabled=true
|
||||
# --env xpack.security.transport.ssl.key=certs/testnode.key
|
||||
# --env xpack.security.transport.ssl.certificate=certs/testnode.crt
|
||||
# --env xpack.security.transport.ssl.certificate_authorities=certs/ca.crt
|
||||
# END
|
||||
# ))
|
||||
# volumes+=($(cat <<-END
|
||||
# --volume $SSL_CERT:/usr/share/elasticsearch/config/certs/testnode.crt
|
||||
# --volume $SSL_KEY:/usr/share/elasticsearch/config/certs/testnode.key
|
||||
# --volume $SSL_CA:/usr/share/elasticsearch/config/certs/ca.crt
|
||||
# END
|
||||
# ))
|
||||
# fi
|
||||
|
||||
url="http://$NODE_NAME"
|
||||
if [[ "$ELASTICSEARCH_VERSION" != *oss* ]]; then
|
||||
url="https://elastic:$ELASTIC_PASSWORD@$NODE_NAME"
|
||||
fi
|
||||
|
||||
echo -e "\033[34;1mINFO:\033[0m Starting container $NODE_NAME \033[0m"
|
||||
set -x
|
||||
docker run \
|
||||
--name "$NODE_NAME" \
|
||||
--network "$NETWORK_NAME" \
|
||||
--env ES_JAVA_OPTS=-"Xms1g -Xmx1g" \
|
||||
"${environment[@]}" \
|
||||
"${volumes[@]}" \
|
||||
--publish "$HTTP_PORT":9200 \
|
||||
--ulimit nofile=65536:65536 \
|
||||
--ulimit memlock=-1:-1 \
|
||||
--detach="$DETACH" \
|
||||
--health-cmd="curl --silent --insecure --fail $url:9200/_cluster/health || exit 1" \
|
||||
--health-interval=2s \
|
||||
--health-retries=20 \
|
||||
--health-timeout=2s \
|
||||
--rm \
|
||||
docker.elastic.co/elasticsearch/"$ELASTICSEARCH_VERSION";
|
||||
set +x
|
||||
|
||||
if [[ "$DETACH" == "true" ]]; then
|
||||
until [[ "$(docker inspect -f "{{.State.Health.Status}}" ${NODE_NAME})" != "starting" ]]; do
|
||||
sleep 2;
|
||||
echo ""
|
||||
echo -e "\033[34;1mINFO:\033[0m waiting for node $NODE_NAME to be up\033[0m"
|
||||
done;
|
||||
# Always show the node getting started logs, this is very useful both on CI as well as while developing
|
||||
docker logs "$NODE_NAME"
|
||||
if [[ "$(docker inspect -f "{{.State.Health.Status}}" ${NODE_NAME})" != "healthy" ]]; then
|
||||
cleanup 1
|
||||
echo
|
||||
echo -e "\033[31;1mERROR:\033[0m Failed to start ${ELASTICSEARCH_VERSION} in detached mode beyond health checks\033[0m"
|
||||
echo -e "\033[31;1mERROR:\033[0m dumped the docker log before shutting the node down\033[0m"
|
||||
exit 1
|
||||
else
|
||||
echo
|
||||
echo -e "\033[32;1mSUCCESS:\033[0m Detached and healthy: ${NODE_NAME} on docker network: ${NETWORK_NAME}\033[0m"
|
||||
echo -e "\033[32;1mSUCCESS:\033[0m Running on: ${url/$NODE_NAME/localhost}:${HTTP_PORT}\033[0m"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
@ -8,6 +8,7 @@
|
||||
#
|
||||
# - $ELASTICSEARCH_VERSION
|
||||
# - $NODE_JS_VERSION
|
||||
# - $TEST_SUITE
|
||||
#
|
||||
|
||||
set -eo pipefail
|
||||
@ -18,48 +19,49 @@ export CODECOV_TOKEN=$(vault read -field=token secret/clients-ci/elasticsearch-j
|
||||
unset VAULT_ROLE_ID VAULT_SECRET_ID VAULT_TOKEN
|
||||
set -x
|
||||
|
||||
function cleanup {
|
||||
docker container rm --force --volumes elasticsearch-oss > /dev/null 2>&1 || true
|
||||
docker container rm --force --volumes elasticsearch-js-oss > /dev/null 2>&1 || true
|
||||
docker network rm esnet-oss > /dev/null
|
||||
}
|
||||
|
||||
trap cleanup EXIT
|
||||
|
||||
# create network and volume
|
||||
docker network create esnet-oss
|
||||
|
||||
# create client image
|
||||
docker build \
|
||||
--file .ci/Dockerfile \
|
||||
--tag elastic/elasticsearch-js \
|
||||
--build-arg NODE_JS_VERSION=${NODE_JS_VERSION} \
|
||||
.
|
||||
|
||||
# run elasticsearch oss
|
||||
NODE_NAME="es1"
|
||||
repo=$(pwd)
|
||||
testnodecrt="/.ci/certs/testnode.crt"
|
||||
testnodekey="/.ci/certs/testnode.key"
|
||||
cacrt="/.ci/certs/ca.crt"
|
||||
|
||||
elasticsearch_image="elasticsearch"
|
||||
elasticsearch_url="https://elastic:changeme@${NODE_NAME}:9200"
|
||||
if [[ $TEST_SUITE != "xpack" ]]; then
|
||||
elasticsearch_image="elasticsearch"
|
||||
elasticsearch_url="http://${NODE_NAME}:9200"
|
||||
fi
|
||||
|
||||
docker network create esnet
|
||||
|
||||
docker run \
|
||||
--rm \
|
||||
--env "node.attr.testattr=test" \
|
||||
--env "path.repo=/tmp" \
|
||||
--env "repositories.url.allowed_urls=http://snapshot.*" \
|
||||
--env "discovery.zen.ping.unicast.hosts=elasticsearch" \
|
||||
--env "discovery.zen.ping.unicast.hosts=${NODE_NAME}" \
|
||||
--env "xpack.security.enabled=false" \
|
||||
--env "xpack.monitoring.enabled=false" \
|
||||
--env "xpack.ml.enabled=false" \
|
||||
--env ES_JAVA_OPTS="-Xms1g -Xmx1g" \
|
||||
--network=esnet-oss \
|
||||
--name=elasticsearch-oss \
|
||||
--network=esnet \
|
||||
--name=$NODE_NAME \
|
||||
--detach \
|
||||
docker.elastic.co/elasticsearch/elasticsearch:${ELASTICSEARCH_VERSION}
|
||||
|
||||
# run the client unit and oss integration test
|
||||
docker run \
|
||||
--network=esnet-oss \
|
||||
--env "TEST_ES_SERVER=http://elasticsearch-oss:9200" \
|
||||
--network=esnet \
|
||||
--env "TEST_ES_SERVER=${elasticsearch_url}" \
|
||||
--env "CODECOV_TOKEN" \
|
||||
--volume $(pwd):/usr/src/app \
|
||||
--volume /usr/src/app/node_modules \
|
||||
--name elasticsearch-js-oss \
|
||||
--name elasticsearch-js \
|
||||
--rm \
|
||||
elastic/elasticsearch-js \
|
||||
npm run ci
|
||||
|
||||
@ -7,4 +7,7 @@ NODE_JS_VERSION:
|
||||
- 10
|
||||
- 8
|
||||
|
||||
TEST_SUITE:
|
||||
- oss
|
||||
|
||||
exclude: ~
|
||||
|
||||
@ -5,6 +5,9 @@ node_js:
|
||||
- "10"
|
||||
- "8"
|
||||
|
||||
cache:
|
||||
npm: false
|
||||
|
||||
os:
|
||||
- windows
|
||||
- linux
|
||||
@ -14,7 +17,9 @@ install:
|
||||
|
||||
script:
|
||||
- if [ "$TRAVIS_OS_NAME" = "linux" ]; then npm run license-checker; fi
|
||||
- npm run test
|
||||
- npm run lint
|
||||
- npm run test:coverage
|
||||
- npm run test:types
|
||||
|
||||
notifications:
|
||||
email:
|
||||
|
||||
@ -2,7 +2,7 @@
|
||||
|
||||
# Elasticsearch Node.js client
|
||||
|
||||
[](http://standardjs.com/) [](https://clients-ci.elastic.co/job/elastic+elasticsearch-js+master/) [](https://codecov.io/gh/elastic/elasticsearch-js) [](https://www.npmjs.com/package/@elastic/elasticsearch)
|
||||
[](http://standardjs.com/) [](https://clients-ci.elastic.co/view/Javascript/job/elastic+elasticsearch-js+master/) [](https://codecov.io/gh/elastic/elasticsearch-js) [](https://www.npmjs.com/package/@elastic/elasticsearch)
|
||||
|
||||
The official Node.js client for Elasticsearch.
|
||||
|
||||
|
||||
@ -64,7 +64,7 @@ _Default:_ `false`
|
||||
_Default:_ `false`
|
||||
|
||||
|`sniffEndpoint`
|
||||
|`string` - Max request timeout for each request. +
|
||||
|`string` - Endpoint to ping during a sniff. +
|
||||
_Default:_ `'_nodes/_all/http'`
|
||||
|
||||
|`sniffOnConnectionFault`
|
||||
@ -204,7 +204,7 @@ Sometimes you just need to inject a little snippet of your code and then continu
|
||||
class MyTransport extends Transport {
|
||||
request (params, options, callback) {
|
||||
// your code
|
||||
super.request(params, options, callback)
|
||||
return super.request(params, options, callback)
|
||||
}
|
||||
}
|
||||
----
|
||||
|
||||
@ -8,57 +8,83 @@ This can greatly increase the indexing speed.
|
||||
----
|
||||
'use strict'
|
||||
|
||||
require('array.prototype.flatmap').shim()
|
||||
const { Client } = require('@elastic/elasticsearch')
|
||||
const client = new Client({ node: 'http://localhost:9200' })
|
||||
const client = new Client({
|
||||
node: 'http://localhost:9200'
|
||||
})
|
||||
|
||||
async function run () {
|
||||
const { body: bulkResponse } = await client.bulk({
|
||||
// here we are forcing an index refresh,
|
||||
// otherwise we will not get any result
|
||||
// in the consequent search
|
||||
refresh: true,
|
||||
body: [
|
||||
// operation to perform
|
||||
{ index: { _index: 'game-of-thrones' } },
|
||||
// the document to index
|
||||
{
|
||||
character: 'Ned Stark',
|
||||
quote: 'Winter is coming.'
|
||||
},
|
||||
|
||||
{ index: { _index: 'game-of-thrones' } },
|
||||
{
|
||||
character: 'Daenerys Targaryen',
|
||||
quote: 'I am the blood of the dragon.'
|
||||
},
|
||||
|
||||
{ index: { _index: 'game-of-thrones' } },
|
||||
{
|
||||
character: 'Tyrion Lannister',
|
||||
quote: 'A mind needs books like a sword needs a whetstone.'
|
||||
}
|
||||
]
|
||||
})
|
||||
|
||||
if (bulkResponse.errors) {
|
||||
console.log(bulkResponse)
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
// Let's search!
|
||||
const { body } = await client.search({
|
||||
index: 'game-of-thrones',
|
||||
await client.indices.create({
|
||||
index: 'tweets',
|
||||
body: {
|
||||
query: {
|
||||
match: {
|
||||
quote: 'winter'
|
||||
mappings: {
|
||||
properties: {
|
||||
id: { type: 'integer' },
|
||||
text: { type: 'text' },
|
||||
user: { type: 'keyword' },
|
||||
time: { type: 'date' }
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}, { ignore: [400] })
|
||||
|
||||
console.log(body.hits.hits)
|
||||
const dataset = [{
|
||||
id: 1,
|
||||
text: 'If I fall, don\'t bring me back.',
|
||||
user: 'jon',
|
||||
date: new Date()
|
||||
}, {
|
||||
id: 2,
|
||||
text: 'Witer is coming',
|
||||
user: 'ned',
|
||||
date: new Date()
|
||||
}, {
|
||||
id: 3,
|
||||
text: 'A Lannister always pays his debts.',
|
||||
user: 'tyrion',
|
||||
date: new Date()
|
||||
}, {
|
||||
id: 4,
|
||||
text: 'I am the blood of the dragon.',
|
||||
user: 'daenerys',
|
||||
date: new Date()
|
||||
}, {
|
||||
id: 5, // change this value to a string to see the bulk response with errors
|
||||
text: 'A girl is Arya Stark of Winterfell. And I\'m going home.',
|
||||
user: 'arya',
|
||||
date: new Date()
|
||||
}]
|
||||
|
||||
const body = dataset.flatMap(doc => [{ index: { _index: 'tweets' } }, doc])
|
||||
|
||||
const { body: bulkResponse } = await client.bulk({ refresh: true, body })
|
||||
|
||||
if (bulkResponse.errors) {
|
||||
const erroredDocuments = []
|
||||
// The items array has the same order of the dataset we just indexed.
|
||||
// The presence of the `error` key indicates that the operation
|
||||
// that we did for the document has failed.
|
||||
bulkResponse.items.forEach((action, i) => {
|
||||
const operation = Object.keys(action)[0]
|
||||
if (action[operation].error) {
|
||||
erroredDocuments.push({
|
||||
// If the status is 429 it means that you can retry the document,
|
||||
// otherwise it's very likely a mapping error, and you should
|
||||
// fix the document before to try it again.
|
||||
status: action[operation].status,
|
||||
error: action[operation].error,
|
||||
operation: body[i * 2],
|
||||
document: body[i * 2 + 1]
|
||||
})
|
||||
}
|
||||
})
|
||||
console.log(erroredDocuments)
|
||||
}
|
||||
|
||||
const { body: count } = await client.count({ index: 'tweets' })
|
||||
console.log(count)
|
||||
}
|
||||
|
||||
run().catch(console.log)
|
||||
----
|
||||
----
|
||||
|
||||
10
docs/page_header.html
Normal file
10
docs/page_header.html
Normal file
@ -0,0 +1,10 @@
|
||||
<p>
|
||||
<strong>WARNING</strong>: Version 5.x has passed its
|
||||
<a href="https://www.elastic.co/support/eol">EOL date</a>.
|
||||
</p>
|
||||
<p>
|
||||
This documentation is no longer being maintained and may be removed.
|
||||
If you are running this version, we strongly advise you to upgrade.
|
||||
For the latest information, see the
|
||||
<a href="../current/index.html">current release documentation</a>.
|
||||
</p>
|
||||
2
lib/Connection.d.ts
vendored
2
lib/Connection.d.ts
vendored
@ -23,6 +23,8 @@ interface ConnectionOptions {
|
||||
|
||||
interface RequestOptions extends http.ClientRequestArgs {
|
||||
asStream?: boolean;
|
||||
body?: any;
|
||||
querystring?: string;
|
||||
}
|
||||
|
||||
export interface AgentOptions {
|
||||
|
||||
@ -356,9 +356,9 @@ class ConnectionPool {
|
||||
url: new URL(address),
|
||||
id: ids[i],
|
||||
roles: Object.assign({
|
||||
[Connection.roles.MASTER]: true,
|
||||
[Connection.roles.DATA]: true,
|
||||
[Connection.roles.INGEST]: true,
|
||||
[Connection.roles.MASTER]: false,
|
||||
[Connection.roles.DATA]: false,
|
||||
[Connection.roles.INGEST]: false,
|
||||
[Connection.roles.ML]: false
|
||||
}, roles)
|
||||
})
|
||||
|
||||
2
lib/Transport.d.ts
vendored
2
lib/Transport.d.ts
vendored
@ -80,7 +80,7 @@ export interface TransportRequestParams {
|
||||
}
|
||||
|
||||
export interface TransportRequestOptions {
|
||||
ignore?: [number];
|
||||
ignore?: number[];
|
||||
requestTimeout?: number | string;
|
||||
maxRetries?: number;
|
||||
asStream?: boolean;
|
||||
|
||||
@ -108,7 +108,7 @@ class Transport {
|
||||
if (meta.aborted === true) return
|
||||
meta.connection = this.getConnection({ requestId: meta.request.id })
|
||||
if (meta.connection === null) {
|
||||
return callback(new NoLivingConnectionsError('There are not living connections'), result)
|
||||
return callback(new NoLivingConnectionsError('There are no living connections'), result)
|
||||
}
|
||||
|
||||
// TODO: make this assignment FAST
|
||||
@ -123,15 +123,17 @@ class Transport {
|
||||
return callback(err, result)
|
||||
}
|
||||
}
|
||||
headers['Content-Type'] = headers['Content-Type'] || 'application/json'
|
||||
|
||||
if (compression === 'gzip') {
|
||||
if (isStream(params.body) === false) {
|
||||
params.body = intoStream(params.body).pipe(createGzip())
|
||||
} else {
|
||||
params.body = params.body.pipe(createGzip())
|
||||
if (params.body !== '') {
|
||||
headers['Content-Type'] = headers['Content-Type'] || 'application/json'
|
||||
if (compression === 'gzip') {
|
||||
if (isStream(params.body) === false) {
|
||||
params.body = intoStream(params.body).pipe(createGzip())
|
||||
} else {
|
||||
params.body = params.body.pipe(createGzip())
|
||||
}
|
||||
headers['Content-Encoding'] = compression
|
||||
}
|
||||
headers['Content-Encoding'] = compression
|
||||
}
|
||||
|
||||
if (isStream(params.body) === false) {
|
||||
|
||||
@ -4,7 +4,7 @@
|
||||
"main": "index.js",
|
||||
"types": "index.d.ts",
|
||||
"homepage": "http://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/index.html",
|
||||
"version": "5.6.20",
|
||||
"version": "5.6.21",
|
||||
"keywords": [
|
||||
"elasticsearch",
|
||||
"elastic",
|
||||
@ -20,13 +20,14 @@
|
||||
"test:unit": "tap test/unit/*.test.js -t 300 --no-coverage",
|
||||
"test:behavior": "tap test/behavior/*.test.js -t 300 --no-coverage",
|
||||
"test:integration": "tap test/integration/index.js -T --no-coverage",
|
||||
"test:integration:report": "npm run test:integration | tap-mocha-reporter xunit > $WORKSPACE/test-report-junit.xml",
|
||||
"test:types": "tsc --project ./test/types/tsconfig.json",
|
||||
"test:coverage": "nyc npm run test:unit && nyc report --reporter=text-lcov > coverage.lcov && codecov",
|
||||
"lint": "standard",
|
||||
"lint:fix": "standard --fix",
|
||||
"ci": "npm run license-checker && npm test && npm run test:integration && npm run test:coverage",
|
||||
"license-checker": "license-checker --production --onlyAllow='MIT;Apache-2.0;Apache1.1;ISC;BSD-3-Clause;BSD-2-Clause'"
|
||||
"license-checker": "license-checker --production --onlyAllow='MIT;Apache-2.0;Apache1.1;ISC;BSD-3-Clause;BSD-2-Clause'",
|
||||
"elasticsearch": "./scripts/es-docker.sh",
|
||||
"elasticsearch:xpack": "./scripts/es-docker-platinum.sh"
|
||||
},
|
||||
"author": {
|
||||
"name": "Tomas Della Vedova",
|
||||
|
||||
@ -6,31 +6,66 @@ testnodekey="/.ci/certs/testnode.key"
|
||||
cacrt="/.ci/certs/ca.crt"
|
||||
|
||||
# pass `--clean` to reemove the old snapshot
|
||||
if [ "$1" != "" ]; then
|
||||
if [ "$1" == "--clean" ]; then
|
||||
docker rmi $(docker images --format '{{.Repository}}:{{.Tag}}' | grep '8.0.0-SNAPSHOT')
|
||||
fi
|
||||
|
||||
exec docker run \
|
||||
--rm \
|
||||
-e "node.attr.testattr=test" \
|
||||
-e "path.repo=/tmp" \
|
||||
-e "repositories.url.allowed_urls=http://snapshot.*" \
|
||||
-e "discovery.type=single-node" \
|
||||
-e "ES_JAVA_OPTS=-Xms1g -Xmx1g" \
|
||||
-e "ELASTIC_PASSWORD=changeme" \
|
||||
-e "xpack.security.enabled=true" \
|
||||
-e "xpack.license.self_generated.type=trial" \
|
||||
-e "xpack.security.http.ssl.enabled=true" \
|
||||
-e "xpack.security.http.ssl.verification_mode=certificate" \
|
||||
-e "xpack.security.http.ssl.key=certs/testnode.key" \
|
||||
-e "xpack.security.http.ssl.certificate=certs/testnode.crt" \
|
||||
-e "xpack.security.http.ssl.certificate_authorities=certs/ca.crt" \
|
||||
-e "xpack.security.transport.ssl.enabled=true" \
|
||||
-e "xpack.security.transport.ssl.key=certs/testnode.key" \
|
||||
-e "xpack.security.transport.ssl.certificate=certs/testnode.crt" \
|
||||
-e "xpack.security.transport.ssl.certificate_authorities=certs/ca.crt" \
|
||||
-v "$repo$testnodecrt:/usr/share/elasticsearch/config/certs/testnode.crt" \
|
||||
-v "$repo$testnodekey:/usr/share/elasticsearch/config/certs/testnode.key" \
|
||||
-v "$repo$cacrt:/usr/share/elasticsearch/config/certs/ca.crt" \
|
||||
-p 9200:9200 \
|
||||
docker.elastic.co/elasticsearch/elasticsearch:5.6.16
|
||||
# Create the 'elastic' network if doesn't exist
|
||||
exec docker network ls | grep elastic > /dev/null || docker network create elastic > /dev/null
|
||||
|
||||
if [ "$1" == "--detach" ]; then
|
||||
exec docker run \
|
||||
--rm \
|
||||
-e "node.attr.testattr=test" \
|
||||
-e "path.repo=/tmp" \
|
||||
-e "repositories.url.allowed_urls=http://snapshot.*" \
|
||||
-e "discovery.type=single-node" \
|
||||
-e "ES_JAVA_OPTS=-Xms1g -Xmx1g" \
|
||||
-e "ELASTIC_PASSWORD=changeme" \
|
||||
-e "xpack.security.enabled=true" \
|
||||
-e "xpack.license.self_generated.type=trial" \
|
||||
-e "xpack.security.http.ssl.enabled=true" \
|
||||
-e "xpack.security.http.ssl.verification_mode=certificate" \
|
||||
-e "xpack.security.http.ssl.key=certs/testnode.key" \
|
||||
-e "xpack.security.http.ssl.certificate=certs/testnode.crt" \
|
||||
-e "xpack.security.http.ssl.certificate_authorities=certs/ca.crt" \
|
||||
-e "xpack.security.transport.ssl.enabled=true" \
|
||||
-e "xpack.security.transport.ssl.key=certs/testnode.key" \
|
||||
-e "xpack.security.transport.ssl.certificate=certs/testnode.crt" \
|
||||
-e "xpack.security.transport.ssl.certificate_authorities=certs/ca.crt" \
|
||||
-v "$repo$testnodecrt:/usr/share/elasticsearch/config/certs/testnode.crt" \
|
||||
-v "$repo$testnodekey:/usr/share/elasticsearch/config/certs/testnode.key" \
|
||||
-v "$repo$cacrt:/usr/share/elasticsearch/config/certs/ca.crt" \
|
||||
-p 9200:9200 \
|
||||
--detach \
|
||||
--network=elastic \
|
||||
--name=elasticsearch \
|
||||
docker.elastic.co/elasticsearch/elasticsearch:5.6.16
|
||||
else
|
||||
exec docker run \
|
||||
--rm \
|
||||
-e "node.attr.testattr=test" \
|
||||
-e "path.repo=/tmp" \
|
||||
-e "repositories.url.allowed_urls=http://snapshot.*" \
|
||||
-e "discovery.type=single-node" \
|
||||
-e "ES_JAVA_OPTS=-Xms1g -Xmx1g" \
|
||||
-e "ELASTIC_PASSWORD=changeme" \
|
||||
-e "xpack.security.enabled=true" \
|
||||
-e "xpack.license.self_generated.type=trial" \
|
||||
-e "xpack.security.http.ssl.enabled=true" \
|
||||
-e "xpack.security.http.ssl.verification_mode=certificate" \
|
||||
-e "xpack.security.http.ssl.key=certs/testnode.key" \
|
||||
-e "xpack.security.http.ssl.certificate=certs/testnode.crt" \
|
||||
-e "xpack.security.http.ssl.certificate_authorities=certs/ca.crt" \
|
||||
-e "xpack.security.transport.ssl.enabled=true" \
|
||||
-e "xpack.security.transport.ssl.key=certs/testnode.key" \
|
||||
-e "xpack.security.transport.ssl.certificate=certs/testnode.crt" \
|
||||
-e "xpack.security.transport.ssl.certificate_authorities=certs/ca.crt" \
|
||||
-v "$repo$testnodecrt:/usr/share/elasticsearch/config/certs/testnode.crt" \
|
||||
-v "$repo$testnodekey:/usr/share/elasticsearch/config/certs/testnode.key" \
|
||||
-v "$repo$cacrt:/usr/share/elasticsearch/config/certs/ca.crt" \
|
||||
-p 9200:9200 \
|
||||
--network=elastic \
|
||||
--name=elasticsearch \
|
||||
docker.elastic.co/elasticsearch/elasticsearch:5.6.16
|
||||
fi
|
||||
|
||||
@ -1,15 +1,44 @@
|
||||
#!/bin/bash
|
||||
|
||||
exec docker run \
|
||||
--rm \
|
||||
-e "node.attr.testattr=test" \
|
||||
-e "path.repo=/tmp" \
|
||||
-e "repositories.url.allowed_urls=http://snapshot.*" \
|
||||
-e "discovery.zen.ping.unicast.hosts=elasticsearch" \
|
||||
-e "xpack.security.enabled=false" \
|
||||
-e "xpack.monitoring.enabled=false" \
|
||||
-e "xpack.ml.enabled=false" \
|
||||
-p 9200:9200 \
|
||||
--network=elastic \
|
||||
--name=elasticsearch \
|
||||
docker.elastic.co/elasticsearch/elasticsearch:5.6.16
|
||||
# Images are cached locally, it may be needed
|
||||
# to delete an old image and download again
|
||||
# the latest snapshot.
|
||||
|
||||
# pass `--clean` to reemove the old snapshot
|
||||
if [ "$1" == "--clean" ]; then
|
||||
docker rmi $(docker images --format '{{.Repository}}:{{.Tag}}' | grep '8.0.0-SNAPSHOT')
|
||||
fi
|
||||
|
||||
# Create the 'elastic' network if doesn't exist
|
||||
exec docker network ls | grep elastic > /dev/null || docker network create elastic > /dev/null
|
||||
|
||||
if [ "$1" == "--detach" ]; then
|
||||
exec docker run \
|
||||
--rm \
|
||||
-e "node.attr.testattr=test" \
|
||||
-e "path.repo=/tmp" \
|
||||
-e "repositories.url.allowed_urls=http://snapshot.*" \
|
||||
-e "discovery.zen.ping.unicast.hosts=elasticsearch" \
|
||||
-e "xpack.security.enabled=false" \
|
||||
-e "xpack.monitoring.enabled=false" \
|
||||
-e "xpack.ml.enabled=false" \
|
||||
-p 9200:9200 \
|
||||
--detach \
|
||||
--network=elastic \
|
||||
--name=elasticsearch \
|
||||
docker.elastic.co/elasticsearch/elasticsearch:5.6.16
|
||||
else
|
||||
exec docker run \
|
||||
--rm \
|
||||
-e "node.attr.testattr=test" \
|
||||
-e "path.repo=/tmp" \
|
||||
-e "repositories.url.allowed_urls=http://snapshot.*" \
|
||||
-e "discovery.zen.ping.unicast.hosts=elasticsearch" \
|
||||
-e "xpack.security.enabled=false" \
|
||||
-e "xpack.monitoring.enabled=false" \
|
||||
-e "xpack.ml.enabled=false" \
|
||||
-p 9200:9200 \
|
||||
--network=elastic \
|
||||
--name=elasticsearch \
|
||||
docker.elastic.co/elasticsearch/elasticsearch:5.6.16
|
||||
fi
|
||||
|
||||
@ -34,7 +34,6 @@ test('Should emit a request event when a request is performed', t => {
|
||||
body: '',
|
||||
querystring: 'q=foo%3Abar',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Content-Length': '0'
|
||||
}
|
||||
},
|
||||
@ -87,7 +86,6 @@ test('Should emit a response event in case of a successful response', t => {
|
||||
body: '',
|
||||
querystring: 'q=foo%3Abar',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Content-Length': '0'
|
||||
}
|
||||
},
|
||||
@ -138,7 +136,6 @@ test('Should emit a response event with the error set', t => {
|
||||
body: '',
|
||||
querystring: 'q=foo%3Abar',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Content-Length': '0'
|
||||
}
|
||||
},
|
||||
|
||||
@ -1865,6 +1865,55 @@ test('Compress request', t => {
|
||||
}
|
||||
})
|
||||
|
||||
t.test('Should skip the compression for empty strings/null/undefined', t => {
|
||||
t.plan(9)
|
||||
|
||||
function handler (req, res) {
|
||||
t.strictEqual(req.headers['content-encoding'], undefined)
|
||||
t.strictEqual(req.headers['content-type'], undefined)
|
||||
res.end()
|
||||
}
|
||||
|
||||
buildServer(handler, ({ port }, server) => {
|
||||
const pool = new ConnectionPool({ Connection })
|
||||
pool.addConnection(`http://localhost:${port}`)
|
||||
|
||||
const transport = new Transport({
|
||||
emit: () => {},
|
||||
connectionPool: pool,
|
||||
serializer: new Serializer(),
|
||||
maxRetries: 3,
|
||||
compression: 'gzip',
|
||||
requestTimeout: 30000,
|
||||
sniffInterval: false,
|
||||
sniffOnStart: false
|
||||
})
|
||||
|
||||
transport.request({
|
||||
method: 'DELETE',
|
||||
path: '/hello',
|
||||
body: ''
|
||||
}, (err, { body }) => {
|
||||
t.error(err)
|
||||
transport.request({
|
||||
method: 'GET',
|
||||
path: '/hello',
|
||||
body: null
|
||||
}, (err, { body }) => {
|
||||
t.error(err)
|
||||
transport.request({
|
||||
method: 'GET',
|
||||
path: '/hello',
|
||||
body: undefined
|
||||
}, (err, { body }) => {
|
||||
t.error(err)
|
||||
server.stop()
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
|
||||
Reference in New Issue
Block a user