Compare commits

..

16 Commits

Author SHA1 Message Date
b390193dcc Bumped v6.8.4 2019-12-03 14:38:58 +01:00
df76154388 X-Opaque-Id support (#997)
* Added X-Opaque-Id support

* Updated type definitions

* Updated test

* Updated docs
2019-12-03 14:33:33 +01:00
3e2a823a72 Updated CI configuration 2019-10-28 15:41:42 +01:00
8a1bb2665e Updated jobs/defaults.yml to use the new job matrix 2019-10-28 14:46:40 +01:00
3acee8b266 New CI configuration (#996)
* Removed old files

* Updated README

* New CI configuration

* Updated run-test script
2019-10-28 14:46:32 +01:00
60a1f298d7 Move back to jjbb (#995)
* Removed Jenkins pipelines configuration

* Restored jjbb definitions
2019-10-28 14:46:03 +01:00
e7c40dd459 Bumped v6.8.3 2019-10-02 11:20:21 +02:00
86361daea5 Fix issue; node roles are defaulting to true when undefined (fal… (#967)
* Fix issue; nodeFilter was unable to filter because master, data, and ingest role were true if even they were false on the node.

* Test nodesToHost of BaseConnectionPool correctly maps node roles
2019-09-18 08:46:44 +02:00
09c2ff8bf3 Fix inaccurate description sniffEndpoint (#959) 2019-09-16 17:43:46 +02:00
75d85cae19 Add body and querystring to RequestOptions (#957) 2019-09-11 11:30:42 +02:00
e30079f64d Updated cron job definition (#939)
* Updated cron job definition

* Update .ci/Jenkinsfile

Co-Authored-By: Victor Martinez <victormartinezrubio@gmail.com>
2019-08-13 10:55:47 +02:00
610bf851d2 Updated .travis.yml (#938) 2019-08-09 14:08:37 +02:00
b91ff8a6cc Merge branch '6.x' of https://github.com/elastic/elasticsearch-js into 6.x 2019-08-09 12:15:12 +02:00
b39f29b2b0 Use Jenkins pipelines (#937) 2019-08-09 12:15:00 +02:00
ef69bbd216 Disable travis cache (#929) 2019-08-02 11:50:15 +02:00
6194119e62 Updated error handling in bulk example (#927) 2019-08-01 12:21:06 +02:00
22 changed files with 593 additions and 161 deletions

7
.ci/docker/Dockerfile Normal file
View File

@ -0,0 +1,7 @@
ARG NODE_JS_VERSION=10
FROM node:${NODE_JS_VERSION}-alpine
RUN apk --no-cache add git
# Create app directory
WORKDIR /usr/src/app

View File

@ -47,6 +47,10 @@
type: yaml
filename: .ci/test-matrix.yml
name: NODE_JS_VERSION
- axis:
type: yaml
filename: .ci/test-matrix.yml
name: TEST_SUITE
yaml-strategy:
exclude-key: exclude
filename: .ci/test-matrix.yml

14
.ci/packer_cache.sh Normal file
View File

@ -0,0 +1,14 @@
#!/usr/bin/env bash
source /usr/local/bin/bash_standard_lib.sh
DOCKER_IMAGES="node:12-alpine
node:10-alpine
node:8-alpine
"
for di in ${DOCKER_IMAGES}
do
(retry 2 docker pull "${di}") || echo "Error pulling ${di} Docker image, we continue"
done

175
.ci/run-elasticsearch.sh Normal file
View File

@ -0,0 +1,175 @@
#!/usr/bin/env bash
#
# Launch one or more Elasticsearch nodes via the Docker image,
# to form a cluster suitable for running the REST API tests.
#
# Export the ELASTICSEARCH_VERSION variable, eg. 'elasticsearch:8.0.0-SNAPSHOT'.
if [[ -z "$ELASTICSEARCH_VERSION" ]]; then
echo -e "\033[31;1mERROR:\033[0m Required environment variable [ELASTICSEARCH_VERSION] not set\033[0m"
exit 1
fi
set -euxo pipefail
moniker=$(echo "$ELASTICSEARCH_VERSION" | tr -C "[:alnum:]" '-')
suffix=rest-test
NODE_NAME=${NODE_NAME-${moniker}node1}
MASTER_NODE_NAME=${MASTER_NODE_NAME-${NODE_NAME}}
CLUSTER_NAME=${CLUSTER_NAME-${moniker}${suffix}}
HTTP_PORT=${HTTP_PORT-9200}
ELASTIC_PASSWORD=${ELASTIC_PASSWORD-changeme}
SSL_CERT=${SSL_CERT-"$PWD/certs/testnode.crt"}
SSL_KEY=${SSL_KEY-"$PWD/certs/testnode.key"}
SSL_CA=${SSL_CA-"$PWD/certs/ca.crt"}
DETACH=${DETACH-false}
CLEANUP=${CLEANUP-false}
volume_name=${NODE_NAME}-${suffix}-data
network_default=${moniker}${suffix}
NETWORK_NAME=${NETWORK_NAME-"$network_default"}
set +x
function cleanup_volume {
if [[ "$(docker volume ls -q -f name=$1)" ]]; then
echo -e "\033[34;1mINFO:\033[0m Removing volume $1\033[0m"
(docker volume rm "$1") || true
fi
}
function cleanup_node {
if [[ "$(docker ps -q -f name=$1)" ]]; then
echo -e "\033[34;1mINFO:\033[0m Removing container $1\033[0m"
(docker container rm --force --volumes "$1") || true
cleanup_volume "$1-${suffix}-data"
fi
}
function cleanup_network {
if [[ "$(docker network ls -q -f name=$1)" ]]; then
echo -e "\033[34;1mINFO:\033[0m Removing network $1\033[0m"
(docker network rm "$1") || true
fi
}
function cleanup {
if [[ "$DETACH" != "true" ]] || [[ "$1" == "1" ]]; then
echo -e "\033[34;1mINFO:\033[0m clean the node and volume on startup (1) OR on exit if not detached\033[0m"
cleanup_node "$NODE_NAME"
fi
if [[ "$DETACH" != "true" ]]; then
echo -e "\033[34;1mINFO:\033[0m clean the network if not detached (start and exit)\033[0m"
cleanup_network "$NETWORK_NAME"
fi
};
trap "cleanup 0" EXIT
if [[ "$CLEANUP" == "true" ]]; then
trap - EXIT
if [[ -z "$(docker network ls -q -f name=${NETWORK_NAME})" ]]; then
echo -e "\033[34;1mINFO:\033[0m $NETWORK_NAME is already deleted\033[0m"
exit 0
fi
containers=$(docker network inspect -f '{{ range $key, $value := .Containers }}{{ printf "%s\n" .Name}}{{ end }}' ${NETWORK_NAME})
while read -r container; do
cleanup_node "$container"
done <<< "$containers"
cleanup_network "$NETWORK_NAME"
echo -e "\033[32;1mSUCCESS:\033[0m Cleaned up and exiting\033[0m"
exit 0
fi
echo -e "\033[34;1mINFO:\033[0m Making sure previous run leftover infrastructure is removed \033[0m"
cleanup 1
echo -e "\033[34;1mINFO:\033[0m Creating network $NETWORK_NAME if it does not exist already \033[0m"
docker network inspect "$NETWORK_NAME" > /dev/null 2>&1 || docker network create "$NETWORK_NAME"
environment=($(cat <<-END
--env node.name=$NODE_NAME
--env cluster.name=$CLUSTER_NAME
--env cluster.routing.allocation.disk.threshold_enabled=false
--env bootstrap.memory_lock=true
--env node.attr.testattr=test
--env path.repo=/tmp
--env repositories.url.allowed_urls=http://snapshot.test*
END
))
volumes=($(cat <<-END
--volume $volume_name:/usr/share/elasticsearch/data
END
))
if [[ "$ELASTICSEARCH_VERSION" != *oss* ]]; then
environment+=($(cat <<-END
--env ELASTIC_PASSWORD=$ELASTIC_PASSWORD
--env xpack.license.self_generated.type=trial
--env xpack.security.enabled=true
--env xpack.security.http.ssl.enabled=true
--env xpack.security.http.ssl.verification_mode=certificate
--env xpack.security.http.ssl.key=certs/testnode.key
--env xpack.security.http.ssl.certificate=certs/testnode.crt
--env xpack.security.http.ssl.certificate_authorities=certs/ca.crt
--env xpack.security.transport.ssl.enabled=true
--env xpack.security.transport.ssl.key=certs/testnode.key
--env xpack.security.transport.ssl.certificate=certs/testnode.crt
--env xpack.security.transport.ssl.certificate_authorities=certs/ca.crt
END
))
volumes+=($(cat <<-END
--volume $SSL_CERT:/usr/share/elasticsearch/config/certs/testnode.crt
--volume $SSL_KEY:/usr/share/elasticsearch/config/certs/testnode.key
--volume $SSL_CA:/usr/share/elasticsearch/config/certs/ca.crt
END
))
fi
url="http://$NODE_NAME"
if [[ "$ELASTICSEARCH_VERSION" != *oss* ]]; then
url="https://elastic:$ELASTIC_PASSWORD@$NODE_NAME"
fi
echo -e "\033[34;1mINFO:\033[0m Starting container $NODE_NAME \033[0m"
set -x
docker run \
--name "$NODE_NAME" \
--network "$NETWORK_NAME" \
--env ES_JAVA_OPTS=-"Xms1g -Xmx1g" \
"${environment[@]}" \
"${volumes[@]}" \
--publish "$HTTP_PORT":9200 \
--ulimit nofile=65536:65536 \
--ulimit memlock=-1:-1 \
--detach="$DETACH" \
--health-cmd="curl --silent --insecure --fail $url:9200/_cluster/health || exit 1" \
--health-interval=2s \
--health-retries=20 \
--health-timeout=2s \
--rm \
docker.elastic.co/elasticsearch/"$ELASTICSEARCH_VERSION";
set +x
if [[ "$DETACH" == "true" ]]; then
until [[ "$(docker inspect -f "{{.State.Health.Status}}" ${NODE_NAME})" != "starting" ]]; do
sleep 2;
echo ""
echo -e "\033[34;1mINFO:\033[0m waiting for node $NODE_NAME to be up\033[0m"
done;
# Always show the node getting started logs, this is very useful both on CI as well as while developing
docker logs "$NODE_NAME"
if [[ "$(docker inspect -f "{{.State.Health.Status}}" ${NODE_NAME})" != "healthy" ]]; then
cleanup 1
echo
echo -e "\033[31;1mERROR:\033[0m Failed to start ${ELASTICSEARCH_VERSION} in detached mode beyond health checks\033[0m"
echo -e "\033[31;1mERROR:\033[0m dumped the docker log before shutting the node down\033[0m"
exit 1
else
echo
echo -e "\033[32;1mSUCCESS:\033[0m Detached and healthy: ${NODE_NAME} on docker network: ${NETWORK_NAME}\033[0m"
echo -e "\033[32;1mSUCCESS:\033[0m Running on: ${url/$NODE_NAME/localhost}:${HTTP_PORT}\033[0m"
exit 0
fi
fi

View File

@ -8,6 +8,7 @@
#
# - $ELASTICSEARCH_VERSION
# - $NODE_JS_VERSION
# - $TEST_SUITE
#
set -eo pipefail
@ -18,93 +19,41 @@ export CODECOV_TOKEN=$(vault read -field=token secret/clients-ci/elasticsearch-j
unset VAULT_ROLE_ID VAULT_SECRET_ID VAULT_TOKEN
set -x
function cleanup {
docker container rm --force --volumes elasticsearch-oss > /dev/null 2>&1 || true
docker container rm --force --volumes elasticsearch-platinum > /dev/null 2>&1 || true
docker container rm --force --volumes elasticsearch-js-oss > /dev/null 2>&1 || true
docker container rm --force --volumes elasticsearch-js-platinum > /dev/null 2>&1 || true
docker network rm esnet-oss > /dev/null
docker network rm esnet-platinum > /dev/null
}
trap cleanup EXIT
# create network and volume
docker network create esnet-oss
docker network create esnet-platinum
# create client image
docker build \
--file .ci/Dockerfile \
--tag elastic/elasticsearch-js \
--build-arg NODE_JS_VERSION=${NODE_JS_VERSION} \
.
# run elasticsearch oss
docker run \
--rm \
--env "node.attr.testattr=test" \
--env "path.repo=/tmp" \
--env "repositories.url.allowed_urls=http://snapshot.*" \
--env "discovery.type=single-node" \
--network=esnet-oss \
--name=elasticsearch-oss \
--detach \
docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTICSEARCH_VERSION}
# run elasticsearch platinum
NODE_NAME="es1"
repo=$(pwd)
testnodecrt="/.ci/certs/testnode.crt"
testnodekey="/.ci/certs/testnode.key"
cacrt="/.ci/certs/ca.crt"
docker run \
--rm \
--env "node.attr.testattr=test" \
--env "path.repo=/tmp" \
--env "repositories.url.allowed_urls=http://snapshot.*" \
--env "discovery.type=single-node" \
--env "ES_JAVA_OPTS=-Xms1g -Xmx1g" \
--env "ELASTIC_PASSWORD=changeme" \
--env "xpack.security.enabled=true" \
--env "xpack.license.self_generated.type=trial" \
--env "xpack.security.http.ssl.enabled=true" \
--env "xpack.security.http.ssl.verification_mode=certificate" \
--env "xpack.security.http.ssl.key=certs/testnode.key" \
--env "xpack.security.http.ssl.certificate=certs/testnode.crt" \
--env "xpack.security.http.ssl.certificate_authorities=certs/ca.crt" \
--env "xpack.security.transport.ssl.enabled=true" \
--env "xpack.security.transport.ssl.key=certs/testnode.key" \
--env "xpack.security.transport.ssl.certificate=certs/testnode.crt" \
--env "xpack.security.transport.ssl.certificate_authorities=certs/ca.crt" \
--volume "$repo$testnodecrt:/usr/share/elasticsearch/config/certs/testnode.crt" \
--volume "$repo$testnodekey:/usr/share/elasticsearch/config/certs/testnode.key" \
--volume "$repo$cacrt:/usr/share/elasticsearch/config/certs/ca.crt" \
--network=esnet-platinum \
--name=elasticsearch-platinum \
--detach \
docker.elastic.co/elasticsearch/elasticsearch:${ELASTICSEARCH_VERSION}
elasticsearch_image="elasticsearch"
elasticsearch_url="https://elastic:changeme@${NODE_NAME}:9200"
if [[ $TEST_SUITE != "xpack" ]]; then
elasticsearch_image="elasticsearch-oss"
elasticsearch_url="http://${NODE_NAME}:9200"
fi
ELASTICSEARCH_VERSION="${elasticsearch_image}:${ELASTICSEARCH_VERSION}" \
NODE_NAME="${NODE_NAME}" \
NETWORK_NAME="esnet" \
DETACH=true \
SSL_CERT="${repo}${testnodecrt}" \
SSL_KEY="${repo}${testnodekey}" \
SSL_CA="${repo}${cacrt}" \
bash .ci/run-elasticsearch.sh
# run the client unit and oss integration test
docker run \
--network=esnet-oss \
--env "TEST_ES_SERVER=http://elasticsearch-oss:9200" \
--network=esnet \
--env "TEST_ES_SERVER=${elasticsearch_url}" \
--env "CODECOV_TOKEN" \
--volume $repo:/usr/src/app \
--volume /usr/src/app/node_modules \
--name elasticsearch-js-oss \
--name elasticsearch-js \
--rm \
elastic/elasticsearch-js \
npm run ci
# run the client platinium integration test
docker run \
--network=esnet-platinum \
--env "TEST_ES_SERVER=https://elastic:changeme@elasticsearch-platinum:9200" \
--volume $repo:/usr/src/app \
--volume /usr/src/app/node_modules \
--name elasticsearch-js-platinum \
--rm \
elastic/elasticsearch-js \
npm run test:integration

View File

@ -1,10 +1,14 @@
---
ELASTICSEARCH_VERSION:
- 6.8.1
- 6.8.4
NODE_JS_VERSION:
- 12
- 10
- 8
TEST_SUITE:
- oss
- xpack
exclude: ~

View File

@ -5,6 +5,9 @@ node_js:
- "10"
- "8"
cache:
npm: false
os:
- windows
- linux
@ -14,7 +17,9 @@ install:
script:
- if [ "$TRAVIS_OS_NAME" = "linux" ]; then npm run license-checker; fi
- npm run test
- npm run lint
- npm run test:coverage
- npm run test:types
notifications:
email:

View File

@ -2,7 +2,7 @@
# Elasticsearch Node.js client
[![js-standard-style](https://img.shields.io/badge/code%20style-standard-brightgreen.svg?style=flat)](http://standardjs.com/) [![Build Status](https://clients-ci.elastic.co/job/elastic+elasticsearch-js+master/badge/icon)](https://clients-ci.elastic.co/job/elastic+elasticsearch-js+master/) [![codecov](https://codecov.io/gh/elastic/elasticsearch-js/branch/master/graph/badge.svg)](https://codecov.io/gh/elastic/elasticsearch-js) [![NPM downloads](https://img.shields.io/npm/dm/@elastic/elasticsearch.svg?style=flat)](https://www.npmjs.com/package/@elastic/elasticsearch)
[![js-standard-style](https://img.shields.io/badge/code%20style-standard-brightgreen.svg?style=flat)](http://standardjs.com/) [![Build Status](https://clients-ci.elastic.co/buildStatus/icon?job=elastic%2Belasticsearch-js%2Bmaster)](https://clients-ci.elastic.co/view/Javascript/job/elastic+elasticsearch-js+master/) [![codecov](https://codecov.io/gh/elastic/elasticsearch-js/branch/master/graph/badge.svg)](https://codecov.io/gh/elastic/elasticsearch-js) [![NPM downloads](https://img.shields.io/npm/dm/@elastic/elasticsearch.svg?style=flat)](https://www.npmjs.com/package/@elastic/elasticsearch)
The official Node.js client for Elasticsearch.

View File

@ -86,7 +86,7 @@ _Default:_ `false`
_Default:_ `false`
|`sniffEndpoint`
|`string` - Max request timeout for each request. +
|`string` - Endpoint to ping during a sniff. +
_Default:_ `'_nodes/_all/http'`
|`sniffOnConnectionFault`
@ -173,6 +173,11 @@ function generateRequestId (params, options) {
|`string` - The name to identify the client instance in the events. +
_Default:_ `elasticsearch-js`
|`opaqueIdPrefix`
|`string` - A string that will be use to prefix any `X-Opaque-Id` header. +
See https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/observability.html#_x-opaque-id_support[`X-Opaque-Id` support] for more details. +
_Default:_ `null`
|`headers`
|`object` - A set of custom headers to send in every request. +
_Default:_ `{}`

View File

@ -8,57 +8,83 @@ This can greatly increase the indexing speed.
----
'use strict'
require('array.prototype.flatmap').shim()
const { Client } = require('@elastic/elasticsearch')
const client = new Client({ node: 'http://localhost:9200' })
const client = new Client({
node: 'http://localhost:9200'
})
async function run () {
const { body: bulkResponse } = await client.bulk({
// here we are forcing an index refresh,
// otherwise we will not get any result
// in the consequent search
refresh: true,
body: [
// operation to perform
{ index: { _index: 'game-of-thrones' } },
// the document to index
{
character: 'Ned Stark',
quote: 'Winter is coming.'
},
{ index: { _index: 'game-of-thrones' } },
{
character: 'Daenerys Targaryen',
quote: 'I am the blood of the dragon.'
},
{ index: { _index: 'game-of-thrones' } },
{
character: 'Tyrion Lannister',
quote: 'A mind needs books like a sword needs a whetstone.'
}
]
})
if (bulkResponse.errors) {
console.log(bulkResponse)
process.exit(1)
}
// Let's search!
const { body } = await client.search({
index: 'game-of-thrones',
await client.indices.create({
index: 'tweets',
body: {
query: {
match: {
quote: 'winter'
mappings: {
properties: {
id: { type: 'integer' },
text: { type: 'text' },
user: { type: 'keyword' },
time: { type: 'date' }
}
}
}
})
}, { ignore: [400] })
console.log(body.hits.hits)
const dataset = [{
id: 1,
text: 'If I fall, don\'t bring me back.',
user: 'jon',
date: new Date()
}, {
id: 2,
text: 'Witer is coming',
user: 'ned',
date: new Date()
}, {
id: 3,
text: 'A Lannister always pays his debts.',
user: 'tyrion',
date: new Date()
}, {
id: 4,
text: 'I am the blood of the dragon.',
user: 'daenerys',
date: new Date()
}, {
id: 5, // change this value to a string to see the bulk response with errors
text: 'A girl is Arya Stark of Winterfell. And I\'m going home.',
user: 'arya',
date: new Date()
}]
const body = dataset.flatMap(doc => [{ index: { _index: 'tweets' } }, doc])
const { body: bulkResponse } = await client.bulk({ refresh: true, body })
if (bulkResponse.errors) {
const erroredDocuments = []
// The items array has the same order of the dataset we just indexed.
// The presence of the `error` key indicates that the operation
// that we did for the document has failed.
bulkResponse.items.forEach((action, i) => {
const operation = Object.keys(action)[0]
if (action[operation].error) {
erroredDocuments.push({
// If the status is 429 it means that you can retry the document,
// otherwise it's very likely a mapping error, and you should
// fix the document before to try it again.
status: action[operation].status,
error: action[operation].error,
operation: body[i * 2],
document: body[i * 2 + 1]
})
}
})
console.log(erroredDocuments)
}
const { body: count } = await client.count({ index: 'tweets' })
console.log(count)
}
run().catch(console.log)
----
----

View File

@ -248,3 +248,46 @@ child.search({
if (err) console.log(err)
})
----
=== X-Opaque-Id support
To improve the overall observability, the client offers an easy way to configure the `X-Opaque-Id` header. If you set the `X-Opaque-Id` in a specific request, this will allow you to discover this identifier in the https://www.elastic.co/guide/en/elasticsearch/reference/master/logging.html#deprecation-logging[deprecation logs], help you with https://www.elastic.co/guide/en/elasticsearch/reference/master/index-modules-slowlog.html#_identifying_search_slow_log_origin[identifying search slow log origin] as well as https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html#_identifying_running_tasks[identifying running tasks].
The `X-Opaque-Id` should be configured in each request, for doing that you can use the `opaqueId` option, as you can see in the following example. +
The resulting header will be `{ 'X-Opaque-Id': 'my-search' }`.
[source,js]
----
const { Client } = require('@elastic/elasticsearch')
const client = new Client({
node: 'http://localhost:9200'
})
client.search({
index: 'my-index',
body: { foo: 'bar' }
}, {
opaqueId: 'my-search'
}, (err, result) => {
if (err) console.log(err)
})
----
Sometimes it may be useful to prefix all the `X-Opaque-Id` headers with a specific string, in case you need to identify a specific client or server. For doing this, the client offers a top-level configuration option: `opaqueIdPrefix`. +
In the following example, the resulting header will be `{ 'X-Opaque-Id': 'proxy-client::my-search' }`.
[source,js]
----
const { Client } = require('@elastic/elasticsearch')
const client = new Client({
node: 'http://localhost:9200',
opaqueIdPrefix: 'proxy-client::'
})
client.search({
index: 'my-index',
body: { foo: 'bar' }
}, {
opaqueId: 'my-search'
}, (err, result) => {
if (err) console.log(err)
})
----

1
index.d.ts vendored
View File

@ -94,6 +94,7 @@ interface ClientOptions {
nodeFilter?: nodeFilterFn;
nodeSelector?: nodeSelectorFn | string;
headers?: anyObject;
opaqueIdPrefix?: string;
generateRequestId?: generateRequestIdFn;
name?: string;
auth?: BasicAuth | ApiKeyAuth;

View File

@ -79,7 +79,8 @@ class Client extends EventEmitter {
nodeSelector: 'round-robin',
generateRequestId: null,
name: 'elasticsearch-js',
auth: null
auth: null,
opaqueIdPrefix: null
}, opts)
this[kInitialOptions] = options
@ -121,7 +122,8 @@ class Client extends EventEmitter {
nodeFilter: options.nodeFilter,
nodeSelector: options.nodeSelector,
generateRequestId: options.generateRequestId,
name: options.name
name: options.name,
opaqueIdPrefix: options.opaqueIdPrefix
})
const apis = buildApi({

2
lib/Connection.d.ts vendored
View File

@ -25,6 +25,8 @@ interface ConnectionOptions {
interface RequestOptions extends http.ClientRequestArgs {
asStream?: boolean;
body?: any;
querystring?: string;
}
export interface AgentOptions {

3
lib/Transport.d.ts vendored
View File

@ -38,6 +38,7 @@ interface TransportOptions {
headers?: anyObject;
generateRequestId?: generateRequestIdFn;
name: string;
opaqueIdPrefix?: string;
}
export interface RequestEvent<T = any, C = any> {
@ -90,6 +91,7 @@ export interface TransportRequestOptions {
id?: any;
context?: any;
warnings?: [string];
opaqueId?: string;
}
export interface TransportRequestCallback {
@ -121,6 +123,7 @@ export default class Transport {
compression: 'gzip' | false;
sniffInterval: number;
sniffOnConnectionFault: boolean;
opaqueIdPrefix: string | null;
sniffEndpoint: string;
_sniffEnabled: boolean;
_nextSniff: number;

View File

@ -41,6 +41,7 @@ class Transport {
this.sniffEndpoint = opts.sniffEndpoint
this.generateRequestId = opts.generateRequestId || generateRequestId()
this.name = opts.name
this.opaqueIdPrefix = opts.opaqueIdPrefix
this.nodeFilter = opts.nodeFilter || defaultNodeFilter
if (typeof opts.nodeSelector === 'function') {
@ -114,6 +115,12 @@ class Transport {
// TODO: make this assignment FAST
const headers = Object.assign({}, this.headers, options.headers)
if (options.opaqueId !== undefined) {
headers['X-Opaque-Id'] = this.opaqueIdPrefix !== null
? this.opaqueIdPrefix + options.opaqueId
: options.opaqueId
}
// handle json body
if (params.body != null) {
if (shouldSerialize(params.body) === true) {

View File

@ -212,9 +212,9 @@ class BaseConnectionPool {
url: new URL(address),
id: ids[i],
roles: Object.assign({
[Connection.roles.MASTER]: true,
[Connection.roles.DATA]: true,
[Connection.roles.INGEST]: true,
[Connection.roles.MASTER]: false,
[Connection.roles.DATA]: false,
[Connection.roles.INGEST]: false,
[Connection.roles.ML]: false
}, roles)
})

View File

@ -4,7 +4,7 @@
"main": "index.js",
"types": "index.d.ts",
"homepage": "http://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/index.html",
"version": "6.8.2",
"version": "6.8.4",
"keywords": [
"elasticsearch",
"elastic",
@ -20,13 +20,14 @@
"test:unit": "tap test/unit/*.test.js -t 300 --no-coverage",
"test:behavior": "tap test/behavior/*.test.js -t 300 --no-coverage",
"test:integration": "tap test/integration/index.js -T --no-coverage",
"test:integration:report": "npm run test:integration | tap-mocha-reporter xunit > $WORKSPACE/test-report-junit.xml",
"test:types": "tsc --project ./test/types/tsconfig.json",
"test:coverage": "nyc tap test/unit/*.test.js test/behavior/*.test.js -t 300 && nyc report --reporter=text-lcov > coverage.lcov && codecov",
"lint": "standard",
"lint:fix": "standard --fix",
"ci": "npm run license-checker && npm test && npm run test:integration && npm run test:coverage",
"license-checker": "license-checker --production --onlyAllow='MIT;Apache-2.0;Apache1.1;ISC;BSD-3-Clause;BSD-2-Clause'"
"license-checker": "license-checker --production --onlyAllow='MIT;Apache-2.0;Apache1.1;ISC;BSD-3-Clause;BSD-2-Clause'",
"elasticsearch": "./scripts/es-docker.sh",
"elasticsearch:xpack": "./scripts/es-docker-platinum.sh"
},
"author": {
"name": "Tomas Della Vedova",

View File

@ -6,31 +6,66 @@ testnodekey="/.ci/certs/testnode.key"
cacrt="/.ci/certs/ca.crt"
# pass `--clean` to reemove the old snapshot
if [ "$1" != "" ]; then
if [ "$1" == "--clean" ]; then
docker rmi $(docker images --format '{{.Repository}}:{{.Tag}}' | grep '8.0.0-SNAPSHOT')
fi
exec docker run \
--rm \
-e "node.attr.testattr=test" \
-e "path.repo=/tmp" \
-e "repositories.url.allowed_urls=http://snapshot.*" \
-e "discovery.type=single-node" \
-e "ES_JAVA_OPTS=-Xms1g -Xmx1g" \
-e "ELASTIC_PASSWORD=changeme" \
-e "xpack.security.enabled=true" \
-e "xpack.license.self_generated.type=trial" \
-e "xpack.security.http.ssl.enabled=true" \
-e "xpack.security.http.ssl.verification_mode=certificate" \
-e "xpack.security.http.ssl.key=certs/testnode.key" \
-e "xpack.security.http.ssl.certificate=certs/testnode.crt" \
-e "xpack.security.http.ssl.certificate_authorities=certs/ca.crt" \
-e "xpack.security.transport.ssl.enabled=true" \
-e "xpack.security.transport.ssl.key=certs/testnode.key" \
-e "xpack.security.transport.ssl.certificate=certs/testnode.crt" \
-e "xpack.security.transport.ssl.certificate_authorities=certs/ca.crt" \
-v "$repo$testnodecrt:/usr/share/elasticsearch/config/certs/testnode.crt" \
-v "$repo$testnodekey:/usr/share/elasticsearch/config/certs/testnode.key" \
-v "$repo$cacrt:/usr/share/elasticsearch/config/certs/ca.crt" \
-p 9200:9200 \
docker.elastic.co/elasticsearch/elasticsearch:6.7.1
# Create the 'elastic' network if doesn't exist
exec docker network ls | grep elastic > /dev/null || docker network create elastic > /dev/null
if [ "$1" == "--detach" ]; then
exec docker run \
--rm \
-e "node.attr.testattr=test" \
-e "path.repo=/tmp" \
-e "repositories.url.allowed_urls=http://snapshot.*" \
-e "discovery.type=single-node" \
-e "ES_JAVA_OPTS=-Xms1g -Xmx1g" \
-e "ELASTIC_PASSWORD=changeme" \
-e "xpack.security.enabled=true" \
-e "xpack.license.self_generated.type=trial" \
-e "xpack.security.http.ssl.enabled=true" \
-e "xpack.security.http.ssl.verification_mode=certificate" \
-e "xpack.security.http.ssl.key=certs/testnode.key" \
-e "xpack.security.http.ssl.certificate=certs/testnode.crt" \
-e "xpack.security.http.ssl.certificate_authorities=certs/ca.crt" \
-e "xpack.security.transport.ssl.enabled=true" \
-e "xpack.security.transport.ssl.key=certs/testnode.key" \
-e "xpack.security.transport.ssl.certificate=certs/testnode.crt" \
-e "xpack.security.transport.ssl.certificate_authorities=certs/ca.crt" \
-v "$repo$testnodecrt:/usr/share/elasticsearch/config/certs/testnode.crt" \
-v "$repo$testnodekey:/usr/share/elasticsearch/config/certs/testnode.key" \
-v "$repo$cacrt:/usr/share/elasticsearch/config/certs/ca.crt" \
-p 9200:9200 \
--detach \
--network=elastic \
--name=elasticsearch \
docker.elastic.co/elasticsearch/elasticsearch:6.8.2
else
exec docker run \
--rm \
-e "node.attr.testattr=test" \
-e "path.repo=/tmp" \
-e "repositories.url.allowed_urls=http://snapshot.*" \
-e "discovery.type=single-node" \
-e "ES_JAVA_OPTS=-Xms1g -Xmx1g" \
-e "ELASTIC_PASSWORD=changeme" \
-e "xpack.security.enabled=true" \
-e "xpack.license.self_generated.type=trial" \
-e "xpack.security.http.ssl.enabled=true" \
-e "xpack.security.http.ssl.verification_mode=certificate" \
-e "xpack.security.http.ssl.key=certs/testnode.key" \
-e "xpack.security.http.ssl.certificate=certs/testnode.crt" \
-e "xpack.security.http.ssl.certificate_authorities=certs/ca.crt" \
-e "xpack.security.transport.ssl.enabled=true" \
-e "xpack.security.transport.ssl.key=certs/testnode.key" \
-e "xpack.security.transport.ssl.certificate=certs/testnode.crt" \
-e "xpack.security.transport.ssl.certificate_authorities=certs/ca.crt" \
-v "$repo$testnodecrt:/usr/share/elasticsearch/config/certs/testnode.crt" \
-v "$repo$testnodekey:/usr/share/elasticsearch/config/certs/testnode.key" \
-v "$repo$cacrt:/usr/share/elasticsearch/config/certs/ca.crt" \
-p 9200:9200 \
--network=elastic \
--name=elasticsearch \
docker.elastic.co/elasticsearch/elasticsearch:6.8.2
fi

View File

@ -1,12 +1,38 @@
#!/bin/bash
exec docker run \
--rm \
-e "node.attr.testattr=test" \
-e "path.repo=/tmp" \
-e "repositories.url.allowed_urls=http://snapshot.*" \
-e "discovery.type=single-node" \
-p 9200:9200 \
--network=elastic \
--name=elasticsearch \
docker.elastic.co/elasticsearch/elasticsearch:6.7.1
# Images are cached locally, it may be needed
# to delete an old image and download again
# the latest snapshot.
# pass `--clean` to reemove the old snapshot
if [ "$1" == "--clean" ]; then
docker rmi $(docker images --format '{{.Repository}}:{{.Tag}}' | grep '8.0.0-SNAPSHOT')
fi
# Create the 'elastic' network if doesn't exist
exec docker network ls | grep elastic > /dev/null || docker network create elastic > /dev/null
if [ "$1" == "--detach" ]; then
exec docker run \
--rm \
-e "node.attr.testattr=test" \
-e "path.repo=/tmp" \
-e "repositories.url.allowed_urls=http://snapshot.*" \
-e "discovery.type=single-node" \
-p 9200:9200 \
--detach \
--network=elastic \
--name=elasticsearch \
docker.elastic.co/elasticsearch/elasticsearch:6.8.2
else
exec docker run \
--rm \
-e "node.attr.testattr=test" \
-e "path.repo=/tmp" \
-e "repositories.url.allowed_urls=http://snapshot.*" \
-e "discovery.type=single-node" \
-p 9200:9200 \
--network=elastic \
--name=elasticsearch \
docker.elastic.co/elasticsearch/elasticsearch:6.8.2
fi

View File

@ -851,3 +851,87 @@ test('Elastic cloud config', t => {
t.end()
})
test('Opaque Id support', t => {
t.test('No opaqueId', t => {
t.plan(3)
function handler (req, res) {
t.strictEqual(req.headers['x-opaque-id'], undefined)
res.setHeader('Content-Type', 'application/json;utf=8')
res.end(JSON.stringify({ hello: 'world' }))
}
buildServer(handler, ({ port }, server) => {
const client = new Client({
node: `http://localhost:${port}`
})
client.search({
index: 'test',
q: 'foo:bar'
}, (err, { body }) => {
t.error(err)
t.deepEqual(body, { hello: 'world' })
server.stop()
})
})
})
t.test('No prefix', t => {
t.plan(3)
function handler (req, res) {
t.strictEqual(req.headers['x-opaque-id'], 'bar')
res.setHeader('Content-Type', 'application/json;utf=8')
res.end(JSON.stringify({ hello: 'world' }))
}
buildServer(handler, ({ port }, server) => {
const client = new Client({
node: `http://localhost:${port}`
})
client.search({
index: 'test',
q: 'foo:bar'
}, {
opaqueId: 'bar'
}, (err, { body }) => {
t.error(err)
t.deepEqual(body, { hello: 'world' })
server.stop()
})
})
})
t.test('With prefix', t => {
t.plan(3)
function handler (req, res) {
t.strictEqual(req.headers['x-opaque-id'], 'foo-bar')
res.setHeader('Content-Type', 'application/json;utf=8')
res.end(JSON.stringify({ hello: 'world' }))
}
buildServer(handler, ({ port }, server) => {
const client = new Client({
node: `http://localhost:${port}`,
opaqueIdPrefix: 'foo-'
})
client.search({
index: 'test',
q: 'foo:bar'
}, {
opaqueId: 'bar'
}, (err, { body }) => {
t.error(err)
t.deepEqual(body, { hello: 'world' })
server.stop()
})
})
})
t.end()
})

View File

@ -474,6 +474,45 @@ test('API', t => {
t.end()
})
t.test('Should map roles', t => {
const pool = new ConnectionPool({ Connection })
const nodes = {
a1: {
http: {
publish_address: 'example.com:9200'
},
roles: ['master', 'data', 'ingest', 'ml']
},
a2: {
http: {
publish_address: 'example.com:9201'
},
roles: []
}
}
t.same(pool.nodesToHost(nodes, 'http:'), [{
url: new URL('http://example.com:9200'),
id: 'a1',
roles: {
master: true,
data: true,
ingest: true,
ml: true
}
}, {
url: new URL('http://example.com:9201'),
id: 'a2',
roles: {
master: false,
data: false,
ingest: false,
ml: false
}
}])
t.end()
})
t.end()
})