Compare commits
80 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| ca457acb5a | |||
| fb1c08010a | |||
| dc8a30be00 | |||
| 5e26ff1a9b | |||
| f58d520883 | |||
| 231cfade24 | |||
| d84cb382f7 | |||
| 9b400afbc8 | |||
| 852ffcafb7 | |||
| 5d9bfd6492 | |||
| d43884cc5c | |||
| f53b82f7d5 | |||
| be2a7ab544 | |||
| 6ff1db20c4 | |||
| a9e68110b5 | |||
| 30360ce8c8 | |||
| c386190c93 | |||
| 6839df018a | |||
| 4c9b2630cf | |||
| 18d8f795a0 | |||
| 4830b9f8e5 | |||
| e6fb667442 | |||
| 56bd7f3e72 | |||
| d105b0c9fe | |||
| e6ea9a9911 | |||
| e7a22a9290 | |||
| 8190bf5ad8 | |||
| eb31fa6003 | |||
| 9a388118fd | |||
| 1a2476abb3 | |||
| 0c823a2893 | |||
| 9afca3dc8e | |||
| f8aa163f87 | |||
| b390193dcc | |||
| df76154388 | |||
| 3e2a823a72 | |||
| 8a1bb2665e | |||
| 3acee8b266 | |||
| 60a1f298d7 | |||
| e7c40dd459 | |||
| 86361daea5 | |||
| 09c2ff8bf3 | |||
| 75d85cae19 | |||
| e30079f64d | |||
| 610bf851d2 | |||
| b91ff8a6cc | |||
| b39f29b2b0 | |||
| ef69bbd216 | |||
| 6194119e62 | |||
| a4bff1c2f0 | |||
| 8e86450aeb | |||
| 90be646658 | |||
| af8bef4892 | |||
| 2504563480 | |||
| 1e55d2a099 | |||
| 23fbc15e1a | |||
| 5b12e8a123 | |||
| c65119bd53 | |||
| 708f9abe3e | |||
| a55856c60b | |||
| 126c277496 | |||
| 23341d5f2a | |||
| c3993f9145 | |||
| c7dd19bfcf | |||
| a018d49442 | |||
| f2a0abf76a | |||
| 85731864bf | |||
| f6abf6e093 | |||
| d7fc4f7b03 | |||
| a9c054ff21 | |||
| 1810f5f0c3 | |||
| edfa527d2d | |||
| 05921b134d | |||
| aadf44bbf2 | |||
| bef1604a8d | |||
| 87fb0a2996 | |||
| 456fc19c94 | |||
| 4c118b9f9a | |||
| dc5c041b07 | |||
| 6774560444 |
@ -1,20 +1,20 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDSTCCAjGgAwIBAgIUIwN+0zglsexRKwE1RGHvlCcmrdwwDQYJKoZIhvcNAQEL
|
||||
BQAwNDEyMDAGA1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5l
|
||||
cmF0ZWQgQ0EwHhcNMTkwMjEzMDcyMjQwWhcNMjIwMjEyMDcyMjQwWjA0MTIwMAYD
|
||||
VQQDEylFbGFzdGljIENlcnRpZmljYXRlIFRvb2wgQXV0b2dlbmVyYXRlZCBDQTCC
|
||||
ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANILs0JO0e7x29zeVx21qalK
|
||||
XKdX+AMlGJPH75wWO/Jq6YHtxt1wYIg762krOBXfG6JsFSOIwIv5VrzGGRGjSPt9
|
||||
OXQyXrDDiQvsBT3rpzLNdDs7KMl2tZswwv7w9ujgud0cYnS1MOpn81rfPc73DvMg
|
||||
xuhplofDx6fn3++PjVRU2FNiIVWyEoaxRjCeGPMBubKZYaYbQA6vYM4Z+ByG727B
|
||||
AyAER3t7xmvYti/EoO2hv2HQk5zgcj/Oq3AJKhnt8LH8fnfm3TnYNM1htvXqhN05
|
||||
vsvhvm2PHfnA5qLlSr/3W0aI/U/PqfsFDCgyRV097sMIaKkmavb0Ue7aQ7lgtp0C
|
||||
AwEAAaNTMFEwHQYDVR0OBBYEFDRKlCMowWR1rwxE0d1lTEQe5O71MB8GA1UdIwQY
|
||||
MBaAFDRKlCMowWR1rwxE0d1lTEQe5O71MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI
|
||||
hvcNAQELBQADggEBAKbCJ95EBpeuvF70KEt6QU70k/SH1NRvM9YzKryV0D975Jvu
|
||||
HOSm9HgSTULeAUFZIa4oYyf3QUfVoI+2T/aQrfXA3gfrJWsHURkyNmiHOFAbYHqi
|
||||
xA6i249G2GTEjc1+le/M2N2CcDKAmurW6vSGK4upXQbPd6KmnhHREX74zkWjnOa+
|
||||
+tibbSSOCT4Tmja2DbBxAPuivU9IB1g/hIUmbYQqKffQrBJA0658tz6w63a/Q7xN
|
||||
pCvvbSgiMZ6qcVIcJkBT2IooYie+ax45pQECHthgIUcQAzfmIfqlU0Qfl8rDgAmn
|
||||
0c1o6HQjKGU2aVGgSRuaaiHaSZjbPIZVS51sOoI=
|
||||
MIIDSjCCAjKgAwIBAgIVAJQLm8V2LcaCTHUcoIfO+KL63nG3MA0GCSqGSIb3DQEB
|
||||
CwUAMDQxMjAwBgNVBAMTKUVsYXN0aWMgQ2VydGlmaWNhdGUgVG9vbCBBdXRvZ2Vu
|
||||
ZXJhdGVkIENBMB4XDTIwMDIyNjA1NTA1N1oXDTIzMDIyNTA1NTA1N1owNDEyMDAG
|
||||
A1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5lcmF0ZWQgQ0Ew
|
||||
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDYyajkPvGtUOE5M1OowQfB
|
||||
kWVrWjo1+LIxzgCeRHp0YztLtdVJ0sk2xoSrt2uZpxcPepdyOseLTjFJex1D2yCR
|
||||
AEniIqcFif4G72nDih2LlbhpUe/+/MTryj8ZTkFTzI+eMmbQi5FFMaH+kwufmdt/
|
||||
5/w8YazO18SxxJUlzMqzfNUrhM8vvvVdxgboU7PWhk28wZHCMHQovomHmzclhRpF
|
||||
N0FMktA98vHHeRjH19P7rNhifSd7hZzoH3H148HVAKoPgqnZ6vW2O2YfAWOP6ulq
|
||||
cyszr57p8fS9B2wSdlWW7nVHU1JuKcYD67CxbBS23BeGFgCj4tiNrmxO8S5Yf85v
|
||||
AgMBAAGjUzBRMB0GA1UdDgQWBBSWAlip9eoPmnG4p4OFZeOUBlAbNDAfBgNVHSME
|
||||
GDAWgBSWAlip9eoPmnG4p4OFZeOUBlAbNDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
|
||||
SIb3DQEBCwUAA4IBAQA19qqrMTWl7YyId+LR/QIHDrP4jfxmrEELrAL58q5Epc1k
|
||||
XxZLzOBSXoBfBrPdv+3XklWqXrZjKWfdkux0Xmjnl4qul+srrZDLJVZG3I7IrITh
|
||||
AmQUmL9MuPiMnAcxoGZp1xpijtW8Qmd2qnambbljWfkuVaa4hcVRfrAX6TciIQ21
|
||||
bS5aeLGrPqR14h30YzDp0RMmTujEa1o6ExN0+RSTkE9m89Q6WdM69az8JW7YkWqm
|
||||
I+UCG3TcLd3TXmN1zNQkq4y2ObDK4Sxy/2p6yFPI1Fds5w/zLfBOvvPQY61vEqs8
|
||||
SCCcQIe7f6NDpIRIBlty1C9IaEHj7edyHjF6rtYb
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
27
.ci/certs/ca.key
Normal file
27
.ci/certs/ca.key
Normal file
@ -0,0 +1,27 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpgIBAAKCAQEA2Mmo5D7xrVDhOTNTqMEHwZFla1o6NfiyMc4AnkR6dGM7S7XV
|
||||
SdLJNsaEq7drmacXD3qXcjrHi04xSXsdQ9sgkQBJ4iKnBYn+Bu9pw4odi5W4aVHv
|
||||
/vzE68o/GU5BU8yPnjJm0IuRRTGh/pMLn5nbf+f8PGGsztfEscSVJczKs3zVK4TP
|
||||
L771XcYG6FOz1oZNvMGRwjB0KL6Jh5s3JYUaRTdBTJLQPfLxx3kYx9fT+6zYYn0n
|
||||
e4Wc6B9x9ePB1QCqD4Kp2er1tjtmHwFjj+rpanMrM6+e6fH0vQdsEnZVlu51R1NS
|
||||
binGA+uwsWwUttwXhhYAo+LYja5sTvEuWH/ObwIDAQABAoIBAQC8QDGnMnmPdWJ+
|
||||
13FYY3cmwel+FXXjFDk5QpgK15A2rUz6a8XxO1d7d1wR+U84uH4v9Na6XQyWjaoD
|
||||
EyPQnuJiyAtgkZLUHoY244PGR5NsePEQlBSCKmGeF5w/j1LvP/2e9EmP4wKdQYJY
|
||||
nLxFNcgEBCFnFbKIU5n8fKa/klybCrwlBokenyBro02tqH4LL7h1YMRRrl97fv1V
|
||||
e/y/0WcMN+KnMglfz6haimBRV2yamCCHHmBImC+wzOgT/quqlxPfI+a3ScHxuA65
|
||||
3QyCavaqlPh+T3lXnN/Na4UWqFtzMmwgJX2x1zM5qiln46/JoDiXtagvV43L3rNs
|
||||
LhPRFeIRAoGBAPhEB7nNpEDNjIRUL6WpebWS9brKAVY7gYn7YQrKGhhCyftyaiBZ
|
||||
zYgxPaJdqYXf+DmkWlANGoYiwEs40QwkR/FZrvO4+Xh3n3dgtl59ZmieuoQvDsG+
|
||||
RYIj+TfBaqhewhZNMMl7dxz7DeyQhyRCdsvl3VqJM0RuOsIrzrhCIEItAoGBAN+K
|
||||
lgWI7swDpOEaLmu+IWMkGImh1LswXoZqIgi/ywZ7htZjPzidOIeUsMi+lrYsKojG
|
||||
uU3sBxASsf9kYXDnuUuUbGT5M/N2ipXERt7klUAA/f5sg1IKlTrabaN/HGs/uNtf
|
||||
Efa8v/h2VyTurdPCJ17TNpbOMDwX1qGM62tyt2CLAoGBAIHCnP8iWq18QeuQTO8b
|
||||
a3/Z9hHRL22w4H4MI6aOB6GSlxuTq6CJD4IVqo9IwSg17fnCy2l3z9s4IqWuZqUf
|
||||
+XJOW8ELd2jdrT2qEOfGR1Z7UCVyqxXcq1vgDYx0zZh/HpalddB5dcJx/c8do2Ty
|
||||
UEE2PcHqYB9uNcvzNbLc7RtpAoGBALbuU0yePUTI6qGnajuTcQEPpeDjhRHWSFRZ
|
||||
ABcG1N8uMS66Mx9iUcNp462zgeP8iqY5caUZtMHreqxT+gWKK7F0+as7386pwElF
|
||||
QPXgO18QMMqHBIQb0vlBjJ1SRPBjSiSDTVEML1DljvTTOX7kEJHh6HdKrmBO5b54
|
||||
cqMQUo53AoGBAPVWRPUXCqlBz914xKna0ZUh2aesRBg5BvOoq9ey9c52EIU5PXL5
|
||||
0Isk8sWSsvhl3tjDPBH5WuL5piKgnCTqkVbEHmWu9s1T57Mw6NuxlPMLBWvyv4c6
|
||||
tB9brOxv0ui3qGMuBsBoDKbkNnwXyOXLyFg7O+H4l016A3mLQzJM+NGV
|
||||
-----END RSA PRIVATE KEY-----
|
||||
@ -1,19 +1,19 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDIjCCAgqgAwIBAgIUI4QU6jA1dYSCbdIA6oAb2TBEluowDQYJKoZIhvcNAQEL
|
||||
BQAwNDEyMDAGA1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5l
|
||||
cmF0ZWQgQ0EwHhcNMTkwMjEzMDcyMzEzWhcNMjIwMjEyMDcyMzEzWjATMREwDwYD
|
||||
VQQDEwhpbnN0YW5jZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJeT
|
||||
yOy6EAScZxrULKjHePciiz38grivCrhFFV+dThaRCcl3DhDzb9Eny5q5iEw3WvLQ
|
||||
Rqmf01jncNIhaocTt66VqveXaMubbE8O0LcG6e4kpFO+JtnVF8JTARTc+ux/1uD6
|
||||
hO1VG/HItM7WQrQxh4hfB2u1AX2YQtoqEtXXEC+UHWfl4QzuzXjBnKCkO/L9/6Tf
|
||||
yNFQWXxKnIiTs8Xm9sEhhSCBJPlLTQu+MX4vR2Uwj5XZmflDUr+ZTenl9qYxL6b3
|
||||
SWhh/qEl4GAj1+tS7ZZOxE0237mUh3IIFYSWSaMm8K2m/BYHkLNWL5B1dMic0lsv
|
||||
osSoYrQuCef4HQMCitsCAwEAAaNNMEswHQYDVR0OBBYEFFMg4l1GLW8lYbwASY+r
|
||||
YeWYRzIiMB8GA1UdIwQYMBaAFDRKlCMowWR1rwxE0d1lTEQe5O71MAkGA1UdEwQC
|
||||
MAAwDQYJKoZIhvcNAQELBQADggEBAEQrgh1xALpumQTzsjxFRGque/vlKTgRs5Kh
|
||||
xtgapr6wjIbdq7dagee+4yNOKzS5lGVXCgwrJlHESv9qY0uumT/33vK2uduJ7NAd
|
||||
fR2ZzyBnhMX+mkYhmGrGYCTUMUIwOIQYa4Evis4W+LHmCIDG03l7gLHfdIBe9VMO
|
||||
pDZum8f6ng0MM49s8/rXODNYKw8kFyUhnfChqMi/2yggb1uUIfKlJJIchkgYjE13
|
||||
zuC+fjo029Pq1jeMIdxugLf/7I/8NiW1Yj9aCXevUXG1qzHFEuKAinBXYOZO/vWS
|
||||
LaEqOhwrzNynwgGpYAr7Rfgv4AflltYIIav4PZT03P7fbyAAf8s=
|
||||
MIIDIzCCAgugAwIBAgIVAMTO6uVx9dLox2t0lY4IcBKZXb5WMA0GCSqGSIb3DQEB
|
||||
CwUAMDQxMjAwBgNVBAMTKUVsYXN0aWMgQ2VydGlmaWNhdGUgVG9vbCBBdXRvZ2Vu
|
||||
ZXJhdGVkIENBMB4XDTIwMDIyNjA1NTA1OVoXDTIzMDIyNTA1NTA1OVowEzERMA8G
|
||||
A1UEAxMIaW5zdGFuY2UwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDK
|
||||
YLTOikVENiN/qYupOsoXd7VYYnryyfCC/dK4FC2aozkbqjFzBdvPGAasoc4yEiH5
|
||||
CGeXMgJuOjk1maqetmdIsw00j4oHJviYsnGXzxxS5swhD7spcW4Uk4V4tAUzrbfT
|
||||
vW/2WW/yYCLe5phVb2chz0jL+WYb4bBmdfs/t6RtP9RqsplYAmVp3gZ6lt2YNtvE
|
||||
k9gz0TVk3DuO1TquIClfRYUjuywS6xDSvxJ8Jl91EfDWM8QU+9F+YAtiv74xl2U3
|
||||
P0wwMqNvMxf9/3ak3lTQGsgO4L6cwbKpVLMMzxSVunZz/sgl19xy3qHHz1Qr2MjJ
|
||||
/2c2J7vahUL4NPRkjJClAgMBAAGjTTBLMB0GA1UdDgQWBBS2Wn8E2VZv4oenY+pR
|
||||
O8G3zfQXhzAfBgNVHSMEGDAWgBSWAlip9eoPmnG4p4OFZeOUBlAbNDAJBgNVHRME
|
||||
AjAAMA0GCSqGSIb3DQEBCwUAA4IBAQAvwPvCiJJ6v9jYcyvYY8I3gP0oCwrylpRL
|
||||
n91UlgRSHUmuAObyOoVN5518gSV/bTU2SDrstcLkLFxHvnfpoGJoxsQEHuGxwDRI
|
||||
nhYNd62EKLerehNM/F9ILKmvTh8f6QPCzjUuExTXv+63l2Sr6dBS7FHsGs6UKUYO
|
||||
llM/y9wMZ1LCuZuBg9RhtgpFXRSgDM9Z7Begu0d/BPX9od/qAeZg9Arz4rwUiCN4
|
||||
IJOMEBEPi5q1tgeS0Fb1Grpqd0Uz5tZKtEHNKzLG+zSMmkneL62Nk2HsmEFZKwzg
|
||||
u2pU42UaUE596G6o78s1aLn9ICcElPHTjiuZNSiyuu9IzvFDjGQw
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
@ -1,27 +1,27 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpQIBAAKCAQEAl5PI7LoQBJxnGtQsqMd49yKLPfyCuK8KuEUVX51OFpEJyXcO
|
||||
EPNv0SfLmrmITDda8tBGqZ/TWOdw0iFqhxO3rpWq95doy5tsTw7Qtwbp7iSkU74m
|
||||
2dUXwlMBFNz67H/W4PqE7VUb8ci0ztZCtDGHiF8Ha7UBfZhC2ioS1dcQL5QdZ+Xh
|
||||
DO7NeMGcoKQ78v3/pN/I0VBZfEqciJOzxeb2wSGFIIEk+UtNC74xfi9HZTCPldmZ
|
||||
+UNSv5lN6eX2pjEvpvdJaGH+oSXgYCPX61Ltlk7ETTbfuZSHcggVhJZJoybwrab8
|
||||
FgeQs1YvkHV0yJzSWy+ixKhitC4J5/gdAwKK2wIDAQABAoIBAQCRFTJna/xy/WUu
|
||||
59FLR4qAOj8++JgCwACpue4oU7/vl6nffSYokWoAr2+RzG4qTX2vFi3cpA8+dGCn
|
||||
sLZvTi8tWzKGxBTZdg2oakzaMzLr74SeZ052iCGyrZJGbvF6Ny7srr1XEXSq6+os
|
||||
ZCb6pMHOhO7saBdiKMAsY8MdjTl/33AduuE6ztqv+L92xTr2g4QlbT1KvWlEgppU
|
||||
k4Gy7zdETkPBTSH/17ZwyGJoJICIAhbL4IpmOM4dPIg8nFkVPPpy6p0z4uGjtgnK
|
||||
nreZ2EKMzCafBaHn7A77gpi0OrQdl6pe0fsGqv/323YjCJPbwwl5TsoNq44DzwiX
|
||||
3M7XiVJxAoGBAOCne56vdN4uZmCgLVGT2JSUNVPOu4bfjrxWH6cslzrPT2Zhp3lO
|
||||
M4axZ3gmcervV252YEZXntXDHHCSfrECllRN1WFD63XmyQ/CkhuvZkkeRHfzL1TE
|
||||
EdqHOTqs4sRETZ7+RITFC81DZQkWWOKeyXMjyPBqd7RnThQHijB1c8Y5AoGBAKy6
|
||||
CVKBx+zz5crVD0tz4UhOmz1wRNN0CL0l+FXRuFSgbzMIvwpfiqe25crgeLHe2M2/
|
||||
TogdWbjZ2nUZQTzoRsSkQ6cKHpj+G/gWurp/UcHHXFVwgLSPF7c3KHDtiYq7Vqw0
|
||||
bvmhM03LI6+ZIPRV7hLBr7WP7UmpAiREMF7tTnmzAoGBAIkx3w3WywFQxtblmyeB
|
||||
qbd7F2IaE23XoxyjX+tBEQ4qQqwcoSE0v8TXHIBEwjceeX+NLVhn9ClJYVniLRq+
|
||||
oL3VVqVyzB4RleJZCc98e3PV1yyFx/b1Uo3pHOsXX9lKeTjKwV9v0rhFGzPEgP3M
|
||||
yOvXA8TG0FnM6OLUg/D6GX0JAoGAMuHS4TVOGeV3ahr9mHKYiN5vKNgrzka+VEod
|
||||
L9rJ/FQOrfADpyCiDen5I5ygsXU+VM3oanyK88NpcVlxOGoMft0M+OYoQVWKE7lO
|
||||
ZKYhBX6fGqQ7pfUJPXXIOgwfmni5fZ0sm+j63g3bg10OsiumKGxaQJgXhL1+3gQg
|
||||
Y7ZwibUCgYEAlZoFFvkMLjpOSaHk1z5ZZnt19X0QUIultBwkumSqMPm+Ks7+uDrx
|
||||
thGUCoz4ecr/ci4bIUY7mB+zfAbqnBOMxreJqCRbAIuRypo1IlWkTp8DywoDOfMW
|
||||
NfzjVmzJ7EJu44nGmVAi1jw4Pbseivvi1ujMCoPgaE8I1uSh144bwN8=
|
||||
MIIEogIBAAKCAQEAymC0zopFRDYjf6mLqTrKF3e1WGJ68snwgv3SuBQtmqM5G6ox
|
||||
cwXbzxgGrKHOMhIh+QhnlzICbjo5NZmqnrZnSLMNNI+KByb4mLJxl88cUubMIQ+7
|
||||
KXFuFJOFeLQFM623071v9llv8mAi3uaYVW9nIc9Iy/lmG+GwZnX7P7ekbT/UarKZ
|
||||
WAJlad4GepbdmDbbxJPYM9E1ZNw7jtU6riApX0WFI7ssEusQ0r8SfCZfdRHw1jPE
|
||||
FPvRfmALYr++MZdlNz9MMDKjbzMX/f92pN5U0BrIDuC+nMGyqVSzDM8Ulbp2c/7I
|
||||
Jdfcct6hx89UK9jIyf9nNie72oVC+DT0ZIyQpQIDAQABAoIBADAh7f7NjgnaInlD
|
||||
ds8KB3SraPsbeQhzlPtiqRJU4j/MIFH/GYG03AGWQkget67a9y+GmzSvlTpoKKEh
|
||||
6h2TXl9BDpv4o6ht0WRn1HJ5tM/Wyqf2WNpTew3zxCPgFPikkXsPrChYPzLTQJfp
|
||||
GkP/mfTFmxfAOlPZSp4j41zVLYs53eDkAegFPVfKSr1XNNJ3QODLPcIBfxBYsiC9
|
||||
oU+jRW8xYuj31cEl5k5UqrChJ1rm3mt6cguqXKbISuoSvi13gXI6DccqhuLAU+Kr
|
||||
ib2XYrRP+pWocZo/pM9WUVoNGtFxfY88sAQtvG6gDKo2AURtFyq84Ow0h9mdixV/
|
||||
gRIDPcECgYEA5nEqE3OKuG9WuUFGXvjtn4C0F6JjflYWh7AbX51S4F6LKrW6/XHL
|
||||
Rg4BtF+XReT7OQ6llsV8kZeUxsUckkgDLzSaA8lysNDV5KkhAWHfRqH//QKFbqZi
|
||||
JL9t3x63Qt81US8s2hQk3khPYTRM8ZB3xHiXvZYSGC/0x/DxfEO3QJECgYEA4NK5
|
||||
sxtrat8sFz6SK9nWEKimPjDVzxJ0hxdX4tRq/JdOO5RncawVqt6TNP9gTuxfBvhW
|
||||
MhJYEsQj8iUoL1dxo9d1eP8HEANNV0iX5OBvJNmgBp+2OyRSyr+PA55+wAxYuAE7
|
||||
QKaitOjW57fpArNRt2hQyiSzTuqUFRWTWJHCWNUCgYAEurPTXF6vdFGCUc2g61jt
|
||||
GhYYGhQSpq+lrz6Qksj9o9MVWE9zHh++21C7o+6V16I0RJGva3QoBMVf4vG4KtQt
|
||||
5tV2WG8LI+4P2Ey+G4UajP6U8bVNVQrUmD0oBBhcvfn5JY+1Fg6/pRpD82/U0VMz
|
||||
7AmpMWhDqNBMPiymkTk0kQKBgCuWb05cSI0ly4SOKwS5bRk5uVFhYnKNH255hh6C
|
||||
FGP4acB/WzbcqC7CjEPAJ0nl5d6SExQOHmk1AcsWjR3wlCWxxiK5PwNJwJrlhh1n
|
||||
reS1FKN0H36D4lFQpkeLWQOe4Sx7gKNeKzlr0w6Fx3Uwku0+Gju2tdTdAey8jB6l
|
||||
08opAoGAEe1AuR/OFp2xw6V8TH9UHkkpGxy+OrXI6PX6tgk29PgB+uiMu4RwbjVz
|
||||
1di1KKq2XecAilVbnyqY+edADxYGbSnci9x5wQRIebfMi3VXKtV8NQBv2as6qwtW
|
||||
JDcQUWotOHjpdvmfJWWkcBhbAKrgX8ukww00ZI/lC3/rmkGnBBg=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
|
||||
20
.ci/certs/testnode_san.crt
Normal file
20
.ci/certs/testnode_san.crt
Normal file
@ -0,0 +1,20 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDVjCCAj6gAwIBAgIULh42yRefYlRRl1hvt055LrUH0HwwDQYJKoZIhvcNAQEL
|
||||
BQAwNDEyMDAGA1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5l
|
||||
cmF0ZWQgQ0EwHhcNMjAwMjI4MDMzNzIwWhcNMjMwMjI3MDMzNzIwWjATMREwDwYD
|
||||
VQQDEwhpbnN0YW5jZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAIUP
|
||||
t267NN21z+3ukajej8eojSXwP6zHxy7CUAp+sQ7bTq2XCKxkYX3CW9ThcS4cV9mL
|
||||
ayYdWEYnbEDGYPQDo7Wk3Ih5OEXTMZb/yNEx5D4S2lGMOS5bCDdYx6GvwCMG4jNx
|
||||
aMktosaxpprAJiHh2oLgQk0hQc/a9JfMo6kJKtuhjxsxjxLwcOHhuaUD7NS0Pjop
|
||||
CJkSYcrL+nnQPQjKe4uLhAbSyiX914h4QX0CJ0e4z1ccdDX2PFWTrwaIf//vQhCR
|
||||
wP2YKdfjR0JB4oDAlu85GsIs2cFLPysM5ufuNZO4fCr8uOwloKI8zZ2HhlIfBEcY
|
||||
Gcy4g9N/9epmxMXZlGcCAwEAAaOBgDB+MB0GA1UdDgQWBBRefYm8DHHDdkTPHhS1
|
||||
HEUwTb2uiDAfBgNVHSMEGDAWgBSWAlip9eoPmnG4p4OFZeOUBlAbNDAxBgNVHREE
|
||||
KjAogglsb2NhbGhvc3SHBH8AAAGHEAAAAAAAAAAAAAAAAAAAAAGCA2VzMTAJBgNV
|
||||
HRMEAjAAMA0GCSqGSIb3DQEBCwUAA4IBAQC+pauqM2wJjQaHyHu+kIm59P4b/5Oj
|
||||
IH1cYCQfMB7Y2UMLxp0ew+f7o7zzE2DA52YYFDWy6J5DVWtSBPyeFGgX+RH+aA+9
|
||||
Iv4cc9QpAs6aFjncorHrzNOrWLgCHIeRAxTR0CAkeP2dUZfDBuMpRyP6rAsYzyLH
|
||||
Rb3/BfYJSI5vxgt5Ke49Y/ljDKFJTyDmAVrHQ4JWrseYE1UZ2eDkBXeiRlYE/QtB
|
||||
YsrUSqdL6zvFZyUcilxDUUabNcA+GgeGZ2lAEA90F8vwi62QwRXo3Iv1Hz+6xc43
|
||||
nFofDK9D8/qkrUD9iuhpx1974QwPhwWyjn9RZRpbZA4ngRL+szdRXR4N
|
||||
-----END CERTIFICATE-----
|
||||
27
.ci/certs/testnode_san.key
Normal file
27
.ci/certs/testnode_san.key
Normal file
@ -0,0 +1,27 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEogIBAAKCAQEAhQ+3brs03bXP7e6RqN6Px6iNJfA/rMfHLsJQCn6xDttOrZcI
|
||||
rGRhfcJb1OFxLhxX2YtrJh1YRidsQMZg9AOjtaTciHk4RdMxlv/I0THkPhLaUYw5
|
||||
LlsIN1jHoa/AIwbiM3FoyS2ixrGmmsAmIeHaguBCTSFBz9r0l8yjqQkq26GPGzGP
|
||||
EvBw4eG5pQPs1LQ+OikImRJhysv6edA9CMp7i4uEBtLKJf3XiHhBfQInR7jPVxx0
|
||||
NfY8VZOvBoh//+9CEJHA/Zgp1+NHQkHigMCW7zkawizZwUs/Kwzm5+41k7h8Kvy4
|
||||
7CWgojzNnYeGUh8ERxgZzLiD03/16mbExdmUZwIDAQABAoIBAEwhjulLMVc9JEfV
|
||||
PP/qv0cUOBYh3LzF3T/yq4slq7Z9YgnOJYdFM8aZgqNNjc09KEJvE5JOLeiNu9Ff
|
||||
768Nugg+2HM5MCo7SN9FYCfZLOcbMFCCM2FDcnMAV9A512vzD08xryuT8dNPZ6yZ
|
||||
DfhK2hQRrb2lrpr3gwSrcGRRu3THqvq7X1RIjpLV3teDMeP8rQPAlpj8fmP+kdVV
|
||||
5y1ihiDIo87McihG9FMavJtBDXQkUEuVw6eIeir8L/zHHD/ZwhYjNHZGWbrB88sz
|
||||
CkJkfWh/FlA63tCVdJzkmnERALLTVy9mR0Sq6sUlnFhFNO2BRdWgYLrcp9McfTJC
|
||||
e8+WsSECgYEAuwQ3nAaFL0jqYu1AREyKT/f3WUenf2UsX7dwwV2/yFtQvkzW7ji4
|
||||
uZLnfUnZBojtHf35dRo+hDgtvhZhgZNAuPPsbOl/EIMTcbChEqV/3CSTFlhLFM1d
|
||||
hfM9PoM+Bt/pyUNabjD1sWM0X7WeUhzcddshY3S4daBsNsLuOzweRRcCgYEAtiSS
|
||||
4qiiGafYsY7gOHuAlOhs/00+1uWIFEHKgoHM9vzCxDN3LCmBdynHk8ZE2TAdhw+l
|
||||
7xpu6LUxKQDfGmVZa9Epg0kQmVq9c54oQP57pJ3tR+68++insEkfnaZH8jblfq2s
|
||||
sSkFrY3pdS19edq60nuft64kswKRUUkamCXTXTECgYBdoSfiMpV9bekC7DsPtq5M
|
||||
iR3KEgi2zEViCmomNTRuL+GF1NyKWdWJ+xVwcYd5MRZdvKimyyPfeGzWTUg14i42
|
||||
KtEEWgZmkukqMz8BIeCYq6sENeIpIQQgqv3PjU+Bi5r1S4Y7wsFPNRakkD4aaB6r
|
||||
1rCppWcwZMeoxwEUoO2aswKBgBdDIIdWJi3EpAY5SyWrkEZ0UMdiZC4p7nE33ddB
|
||||
IJ5CtdU9BXFcc652ZYjX/58FaCABvZ2F8LhDu92SwOusGfmNIxIjWL1dO2jywA1c
|
||||
8wmZKd7P/M7nbdMz45fMzs9+d1zwbWfK53C8+R4AC1BuwQF0zHc3BHTgVRLelUjt
|
||||
O8thAoGAdO2gHIqEsZzTgbvLbsh52eVbumjfNGnrnEv1fjb+o+/wAol8dymcmzbL
|
||||
bZCRzoyA0qwU9kdPFgX46H6so6o1tUM2GQtVFoT6kDnPv7EkLQK0C4cDh6OOHxDU
|
||||
NPvr/9fHhQd9EDWDvS1JnVMAdKDO6ELp3SoKGGmCXR2QplnqWAk=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
7
.ci/docker/Dockerfile
Normal file
7
.ci/docker/Dockerfile
Normal file
@ -0,0 +1,7 @@
|
||||
ARG NODE_JS_VERSION=10
|
||||
FROM node:${NODE_JS_VERSION}-alpine
|
||||
|
||||
RUN apk --no-cache add git
|
||||
|
||||
# Create app directory
|
||||
WORKDIR /usr/src/app
|
||||
67
.ci/functions/cleanup.sh
Normal file
67
.ci/functions/cleanup.sh
Normal file
@ -0,0 +1,67 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Shared cleanup routines between different steps
|
||||
#
|
||||
# Please source .ci/functions/imports.sh as a whole not just this file
|
||||
#
|
||||
# Version 1.0.0
|
||||
# - Initial version after refactor
|
||||
|
||||
function cleanup_volume {
|
||||
if [[ "$(docker volume ls -q -f name=$1)" ]]; then
|
||||
echo -e "\033[34;1mINFO:\033[0m Removing volume $1\033[0m"
|
||||
(docker volume rm "$1") || true
|
||||
fi
|
||||
}
|
||||
function container_running {
|
||||
if [[ "$(docker ps -q -f name=$1)" ]]; then
|
||||
return 0;
|
||||
else return 1;
|
||||
fi
|
||||
}
|
||||
function cleanup_node {
|
||||
if container_running "$1"; then
|
||||
echo -e "\033[34;1mINFO:\033[0m Removing container $1\033[0m"
|
||||
(docker container rm --force --volumes "$1") || true
|
||||
fi
|
||||
if [[ -n "$1" ]]; then
|
||||
echo -e "\033[34;1mINFO:\033[0m Removing volume $1-${suffix}-data\033[0m"
|
||||
cleanup_volume "$1-${suffix}-data"
|
||||
fi
|
||||
}
|
||||
function cleanup_network {
|
||||
if [[ "$(docker network ls -q -f name=$1)" ]]; then
|
||||
echo -e "\033[34;1mINFO:\033[0m Removing network $1\033[0m"
|
||||
(docker network rm "$1") || true
|
||||
fi
|
||||
}
|
||||
|
||||
function cleanup_trap {
|
||||
status=$?
|
||||
set +x
|
||||
if [[ "$DETACH" != "true" ]]; then
|
||||
echo -e "\033[34;1mINFO:\033[0m clean the network if not detached (start and exit)\033[0m"
|
||||
cleanup_all_in_network "$1"
|
||||
fi
|
||||
# status is 0 or SIGINT
|
||||
if [[ "$status" == "0" || "$status" == "130" ]]; then
|
||||
echo -e "\n\033[32;1mSUCCESS run-tests\033[0m"
|
||||
exit 0
|
||||
else
|
||||
echo -e "\n\033[31;1mFAILURE during run-tests\033[0m"
|
||||
exit ${status}
|
||||
fi
|
||||
};
|
||||
function cleanup_all_in_network {
|
||||
|
||||
if [[ -z "$(docker network ls -q -f name="^$1\$")" ]]; then
|
||||
echo -e "\033[34;1mINFO:\033[0m $1 is already deleted\033[0m"
|
||||
return 0
|
||||
fi
|
||||
containers=$(docker network inspect -f '{{ range $key, $value := .Containers }}{{ printf "%s\n" .Name}}{{ end }}' $1)
|
||||
while read -r container; do
|
||||
cleanup_node "$container"
|
||||
done <<< "$containers"
|
||||
cleanup_network $1
|
||||
echo -e "\033[32;1mSUCCESS:\033[0m Cleaned up and exiting\033[0m"
|
||||
};
|
||||
60
.ci/functions/imports.sh
Normal file
60
.ci/functions/imports.sh
Normal file
@ -0,0 +1,60 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Sets up all the common variables and imports relevant functions
|
||||
#
|
||||
# Version 1.0.1
|
||||
# - Initial version after refactor
|
||||
# - Validate STACK_VERSION asap
|
||||
|
||||
function require_stack_version() {
|
||||
if [[ -z $STACK_VERSION ]]; then
|
||||
echo -e "\033[31;1mERROR:\033[0m Required environment variable [STACK_VERSION] not set\033[0m"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
require_stack_version
|
||||
|
||||
if [[ -z $es_node_name ]]; then
|
||||
# only set these once
|
||||
set -euo pipefail
|
||||
export TEST_SUITE=${TEST_SUITE-oss}
|
||||
export RUNSCRIPTS=${RUNSCRIPTS-}
|
||||
export DETACH=${DETACH-false}
|
||||
export CLEANUP=${CLEANUP-false}
|
||||
|
||||
export es_node_name=instance
|
||||
export elastic_password=changeme
|
||||
export elasticsearch_image=elasticsearch
|
||||
export elasticsearch_url=https://elastic:${elastic_password}@${es_node_name}:9200
|
||||
if [[ $TEST_SUITE != "xpack" ]]; then
|
||||
export elasticsearch_image=elasticsearch-${TEST_SUITE}
|
||||
export elasticsearch_url=http://${es_node_name}:9200
|
||||
fi
|
||||
export external_elasticsearch_url=${elasticsearch_url/$es_node_name/localhost}
|
||||
export elasticsearch_container="${elasticsearch_image}:${STACK_VERSION}"
|
||||
|
||||
export suffix=rest-test
|
||||
export moniker=$(echo "$elasticsearch_container" | tr -C "[:alnum:]" '-')
|
||||
export network_name=${moniker}${suffix}
|
||||
|
||||
export ssl_cert="${script_path}/certs/testnode.crt"
|
||||
export ssl_key="${script_path}/certs/testnode.key"
|
||||
export ssl_ca="${script_path}/certs/ca.crt"
|
||||
|
||||
fi
|
||||
|
||||
export script_path=$(dirname $(realpath -s $0))
|
||||
source $script_path/functions/cleanup.sh
|
||||
source $script_path/functions/wait-for-container.sh
|
||||
trap "cleanup_trap ${network_name}" EXIT
|
||||
|
||||
|
||||
if [[ "$CLEANUP" == "true" ]]; then
|
||||
cleanup_all_in_network $network_name
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo -e "\033[34;1mINFO:\033[0m Creating network $network_name if it does not exist already \033[0m"
|
||||
docker network inspect "$network_name" > /dev/null 2>&1 || docker network create "$network_name"
|
||||
|
||||
36
.ci/functions/wait-for-container.sh
Normal file
36
.ci/functions/wait-for-container.sh
Normal file
@ -0,0 +1,36 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Exposes a routine scripts can call to wait for a container if that container set up a health command
|
||||
#
|
||||
# Please source .ci/functions/imports.sh as a whole not just this file
|
||||
#
|
||||
# Version 1.0.1
|
||||
# - Initial version after refactor
|
||||
# - Make sure wait_for_contiainer is silent
|
||||
|
||||
function wait_for_container {
|
||||
set +x
|
||||
until ! container_running "$1" || (container_running "$1" && [[ "$(docker inspect -f "{{.State.Health.Status}}" ${1})" != "starting" ]]); do
|
||||
echo ""
|
||||
docker inspect -f "{{range .State.Health.Log}}{{.Output}}{{end}}" ${1}
|
||||
echo -e "\033[34;1mINFO:\033[0m waiting for node $1 to be up\033[0m"
|
||||
sleep 2;
|
||||
done;
|
||||
|
||||
# Always show logs if the container is running, this is very useful both on CI as well as while developing
|
||||
if container_running $1; then
|
||||
docker logs $1
|
||||
fi
|
||||
|
||||
if ! container_running $1 || [[ "$(docker inspect -f "{{.State.Health.Status}}" ${1})" != "healthy" ]]; then
|
||||
cleanup_all_in_network $2
|
||||
echo
|
||||
echo -e "\033[31;1mERROR:\033[0m Failed to start $1 in detached mode beyond health checks\033[0m"
|
||||
echo -e "\033[31;1mERROR:\033[0m dumped the docker log before shutting the node down\033[0m"
|
||||
return 1
|
||||
else
|
||||
echo
|
||||
echo -e "\033[32;1mSUCCESS:\033[0m Detached and healthy: ${1} on docker network: ${network_name}\033[0m"
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
@ -1,4 +1,4 @@
|
||||
---
|
||||
|
||||
|
||||
##### GLOBAL METADATA
|
||||
|
||||
@ -42,11 +42,15 @@
|
||||
- axis:
|
||||
type: yaml
|
||||
filename: .ci/test-matrix.yml
|
||||
name: ELASTICSEARCH_VERSION
|
||||
name: STACK_VERSION
|
||||
- axis:
|
||||
type: yaml
|
||||
filename: .ci/test-matrix.yml
|
||||
name: NODE_JS_VERSION
|
||||
- axis:
|
||||
type: yaml
|
||||
filename: .ci/test-matrix.yml
|
||||
name: TEST_SUITE
|
||||
yaml-strategy:
|
||||
exclude-key: exclude
|
||||
filename: .ci/test-matrix.yml
|
||||
@ -65,3 +69,6 @@
|
||||
publishers:
|
||||
- email:
|
||||
recipients: infra-root+build@elastic.co
|
||||
# - junit:
|
||||
# results: "*-junit.xml"
|
||||
# allow-empty-results: true
|
||||
|
||||
15
.ci/packer_cache.sh
Normal file
15
.ci/packer_cache.sh
Normal file
@ -0,0 +1,15 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source /usr/local/bin/bash_standard_lib.sh
|
||||
|
||||
DOCKER_IMAGES="node:14-alpine
|
||||
node:12-alpine
|
||||
node:10-alpine
|
||||
node:8-alpine
|
||||
"
|
||||
|
||||
for di in ${DOCKER_IMAGES}
|
||||
do
|
||||
(retry 2 docker pull "${di}") || echo "Error pulling ${di} Docker image, we continue"
|
||||
done
|
||||
|
||||
175
.ci/run-elasticsearch.sh
Executable file
175
.ci/run-elasticsearch.sh
Executable file
@ -0,0 +1,175 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Launch one or more Elasticsearch nodes via the Docker image,
|
||||
# to form a cluster suitable for running the REST API tests.
|
||||
#
|
||||
# Export the ELASTICSEARCH_VERSION variable, eg. 'elasticsearch:8.0.0-SNAPSHOT'.
|
||||
|
||||
if [[ -z "$ELASTICSEARCH_VERSION" ]]; then
|
||||
echo -e "\033[31;1mERROR:\033[0m Required environment variable [ELASTICSEARCH_VERSION] not set\033[0m"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
moniker=$(echo "$ELASTICSEARCH_VERSION" | tr -C "[:alnum:]" '-')
|
||||
suffix=rest-test
|
||||
|
||||
NODE_NAME=${NODE_NAME-${moniker}node1}
|
||||
MASTER_NODE_NAME=${MASTER_NODE_NAME-${NODE_NAME}}
|
||||
CLUSTER_NAME=${CLUSTER_NAME-${moniker}${suffix}}
|
||||
HTTP_PORT=${HTTP_PORT-9200}
|
||||
|
||||
ELASTIC_PASSWORD=${ELASTIC_PASSWORD-changeme}
|
||||
SSL_CERT=${SSL_CERT-"$PWD/certs/testnode.crt"}
|
||||
SSL_KEY=${SSL_KEY-"$PWD/certs/testnode.key"}
|
||||
SSL_CA=${SSL_CA-"$PWD/certs/ca.crt"}
|
||||
|
||||
DETACH=${DETACH-false}
|
||||
CLEANUP=${CLEANUP-false}
|
||||
|
||||
volume_name=${NODE_NAME}-${suffix}-data
|
||||
network_default=${moniker}${suffix}
|
||||
NETWORK_NAME=${NETWORK_NAME-"$network_default"}
|
||||
|
||||
set +x
|
||||
|
||||
function cleanup_volume {
|
||||
if [[ "$(docker volume ls -q -f name=$1)" ]]; then
|
||||
echo -e "\033[34;1mINFO:\033[0m Removing volume $1\033[0m"
|
||||
(docker volume rm "$1") || true
|
||||
fi
|
||||
}
|
||||
function cleanup_node {
|
||||
if [[ "$(docker ps -q -f name=$1)" ]]; then
|
||||
echo -e "\033[34;1mINFO:\033[0m Removing container $1\033[0m"
|
||||
(docker container rm --force --volumes "$1") || true
|
||||
cleanup_volume "$1-${suffix}-data"
|
||||
fi
|
||||
}
|
||||
function cleanup_network {
|
||||
if [[ "$(docker network ls -q -f name=$1)" ]]; then
|
||||
echo -e "\033[34;1mINFO:\033[0m Removing network $1\033[0m"
|
||||
(docker network rm "$1") || true
|
||||
fi
|
||||
}
|
||||
|
||||
function cleanup {
|
||||
if [[ "$DETACH" != "true" ]] || [[ "$1" == "1" ]]; then
|
||||
echo -e "\033[34;1mINFO:\033[0m clean the node and volume on startup (1) OR on exit if not detached\033[0m"
|
||||
cleanup_node "$NODE_NAME"
|
||||
fi
|
||||
if [[ "$DETACH" != "true" ]]; then
|
||||
echo -e "\033[34;1mINFO:\033[0m clean the network if not detached (start and exit)\033[0m"
|
||||
cleanup_network "$NETWORK_NAME"
|
||||
fi
|
||||
};
|
||||
trap "cleanup 0" EXIT
|
||||
|
||||
if [[ "$CLEANUP" == "true" ]]; then
|
||||
trap - EXIT
|
||||
if [[ -z "$(docker network ls -q -f name=${NETWORK_NAME})" ]]; then
|
||||
echo -e "\033[34;1mINFO:\033[0m $NETWORK_NAME is already deleted\033[0m"
|
||||
exit 0
|
||||
fi
|
||||
containers=$(docker network inspect -f '{{ range $key, $value := .Containers }}{{ printf "%s\n" .Name}}{{ end }}' ${NETWORK_NAME})
|
||||
while read -r container; do
|
||||
cleanup_node "$container"
|
||||
done <<< "$containers"
|
||||
cleanup_network "$NETWORK_NAME"
|
||||
echo -e "\033[32;1mSUCCESS:\033[0m Cleaned up and exiting\033[0m"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo -e "\033[34;1mINFO:\033[0m Making sure previous run leftover infrastructure is removed \033[0m"
|
||||
cleanup 1
|
||||
|
||||
echo -e "\033[34;1mINFO:\033[0m Creating network $NETWORK_NAME if it does not exist already \033[0m"
|
||||
docker network inspect "$NETWORK_NAME" > /dev/null 2>&1 || docker network create "$NETWORK_NAME"
|
||||
|
||||
environment=($(cat <<-END
|
||||
--env node.name=$NODE_NAME
|
||||
--env cluster.name=$CLUSTER_NAME
|
||||
--env cluster.routing.allocation.disk.threshold_enabled=false
|
||||
--env bootstrap.memory_lock=true
|
||||
--env node.attr.testattr=test
|
||||
--env path.repo=/tmp
|
||||
--env repositories.url.allowed_urls=http://snapshot.test*
|
||||
END
|
||||
))
|
||||
|
||||
volumes=($(cat <<-END
|
||||
--volume $volume_name:/usr/share/elasticsearch/data
|
||||
END
|
||||
))
|
||||
|
||||
if [[ "$ELASTICSEARCH_VERSION" != *oss* ]]; then
|
||||
environment+=($(cat <<-END
|
||||
--env ELASTIC_PASSWORD=$ELASTIC_PASSWORD
|
||||
--env xpack.license.self_generated.type=trial
|
||||
--env xpack.security.enabled=true
|
||||
--env xpack.security.http.ssl.enabled=true
|
||||
--env xpack.security.http.ssl.verification_mode=certificate
|
||||
--env xpack.security.http.ssl.key=certs/testnode.key
|
||||
--env xpack.security.http.ssl.certificate=certs/testnode.crt
|
||||
--env xpack.security.http.ssl.certificate_authorities=certs/ca.crt
|
||||
--env xpack.security.transport.ssl.enabled=true
|
||||
--env xpack.security.transport.ssl.key=certs/testnode.key
|
||||
--env xpack.security.transport.ssl.certificate=certs/testnode.crt
|
||||
--env xpack.security.transport.ssl.certificate_authorities=certs/ca.crt
|
||||
END
|
||||
))
|
||||
volumes+=($(cat <<-END
|
||||
--volume $SSL_CERT:/usr/share/elasticsearch/config/certs/testnode.crt
|
||||
--volume $SSL_KEY:/usr/share/elasticsearch/config/certs/testnode.key
|
||||
--volume $SSL_CA:/usr/share/elasticsearch/config/certs/ca.crt
|
||||
END
|
||||
))
|
||||
fi
|
||||
|
||||
url="http://$NODE_NAME"
|
||||
if [[ "$ELASTICSEARCH_VERSION" != *oss* ]]; then
|
||||
url="https://elastic:$ELASTIC_PASSWORD@$NODE_NAME"
|
||||
fi
|
||||
|
||||
echo -e "\033[34;1mINFO:\033[0m Starting container $NODE_NAME \033[0m"
|
||||
set -x
|
||||
docker run \
|
||||
--name "$NODE_NAME" \
|
||||
--network "$NETWORK_NAME" \
|
||||
--env ES_JAVA_OPTS=-"Xms1g -Xmx1g" \
|
||||
"${environment[@]}" \
|
||||
"${volumes[@]}" \
|
||||
--publish "$HTTP_PORT":9200 \
|
||||
--ulimit nofile=65536:65536 \
|
||||
--ulimit memlock=-1:-1 \
|
||||
--detach="$DETACH" \
|
||||
--health-cmd="curl --silent --insecure --fail $url:9200/_cluster/health || exit 1" \
|
||||
--health-interval=2s \
|
||||
--health-retries=20 \
|
||||
--health-timeout=2s \
|
||||
--rm \
|
||||
docker.elastic.co/elasticsearch/"$ELASTICSEARCH_VERSION";
|
||||
set +x
|
||||
|
||||
if [[ "$DETACH" == "true" ]]; then
|
||||
until [[ "$(docker inspect -f "{{.State.Health.Status}}" ${NODE_NAME})" != "starting" ]]; do
|
||||
sleep 2;
|
||||
echo ""
|
||||
echo -e "\033[34;1mINFO:\033[0m waiting for node $NODE_NAME to be up\033[0m"
|
||||
done;
|
||||
# Always show the node getting started logs, this is very useful both on CI as well as while developing
|
||||
docker logs "$NODE_NAME"
|
||||
if [[ "$(docker inspect -f "{{.State.Health.Status}}" ${NODE_NAME})" != "healthy" ]]; then
|
||||
cleanup 1
|
||||
echo
|
||||
echo -e "\033[31;1mERROR:\033[0m Failed to start ${ELASTICSEARCH_VERSION} in detached mode beyond health checks\033[0m"
|
||||
echo -e "\033[31;1mERROR:\033[0m dumped the docker log before shutting the node down\033[0m"
|
||||
exit 1
|
||||
else
|
||||
echo
|
||||
echo -e "\033[32;1mSUCCESS:\033[0m Detached and healthy: ${NODE_NAME} on docker network: ${NETWORK_NAME}\033[0m"
|
||||
echo -e "\033[32;1mSUCCESS:\033[0m Running on: ${url/$NODE_NAME/localhost}:${HTTP_PORT}\033[0m"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
@ -8,102 +8,47 @@
|
||||
#
|
||||
# - $ELASTICSEARCH_VERSION
|
||||
# - $NODE_JS_VERSION
|
||||
# - $TEST_SUITE
|
||||
#
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
set +x
|
||||
export VAULT_TOKEN=$(vault write -field=token auth/approle/login role_id="$VAULT_ROLE_ID" secret_id="$VAULT_SECRET_ID")
|
||||
export CODECOV_TOKEN=$(vault read -field=token secret/clients-ci/elasticsearch-js/codecov)
|
||||
unset VAULT_ROLE_ID VAULT_SECRET_ID VAULT_TOKEN
|
||||
set -x
|
||||
ELASTICSEARCH_VERSION=${STACK_VERSION}
|
||||
|
||||
function cleanup {
|
||||
docker container rm --force --volumes elasticsearch-oss > /dev/null 2>&1 || true
|
||||
docker container rm --force --volumes elasticsearch-platinum > /dev/null 2>&1 || true
|
||||
docker container rm --force --volumes elasticsearch-js-oss > /dev/null 2>&1 || true
|
||||
docker container rm --force --volumes elasticsearch-js-platinum > /dev/null 2>&1 || true
|
||||
docker network rm esnet-oss > /dev/null
|
||||
docker network rm esnet-platinum > /dev/null
|
||||
}
|
||||
|
||||
trap cleanup EXIT
|
||||
|
||||
# create network and volume
|
||||
docker network create esnet-oss
|
||||
docker network create esnet-platinum
|
||||
|
||||
# create client image
|
||||
docker build \
|
||||
--file .ci/Dockerfile \
|
||||
--tag elastic/elasticsearch-js \
|
||||
--build-arg NODE_JS_VERSION=${NODE_JS_VERSION} \
|
||||
.
|
||||
|
||||
# run elasticsearch oss
|
||||
docker run \
|
||||
--rm \
|
||||
--env "node.attr.testattr=test" \
|
||||
--env "path.repo=/tmp" \
|
||||
--env "repositories.url.allowed_urls=http://snapshot.*" \
|
||||
--env "discovery.type=single-node" \
|
||||
--network=esnet-oss \
|
||||
--name=elasticsearch-oss \
|
||||
--detach \
|
||||
docker.elastic.co/elasticsearch/elasticsearch-oss:${ELASTICSEARCH_VERSION}
|
||||
|
||||
# run elasticsearch platinum
|
||||
NODE_NAME="es1"
|
||||
repo=$(pwd)
|
||||
testnodecrt="/.ci/certs/testnode.crt"
|
||||
testnodekey="/.ci/certs/testnode.key"
|
||||
cacrt="/.ci/certs/ca.crt"
|
||||
|
||||
docker run \
|
||||
--rm \
|
||||
--env "node.attr.testattr=test" \
|
||||
--env "path.repo=/tmp" \
|
||||
--env "repositories.url.allowed_urls=http://snapshot.*" \
|
||||
--env "discovery.type=single-node" \
|
||||
--env "ES_JAVA_OPTS=-Xms1g -Xmx1g" \
|
||||
--env "ELASTIC_PASSWORD=changeme" \
|
||||
--env "xpack.security.enabled=true" \
|
||||
--env "xpack.license.self_generated.type=trial" \
|
||||
--env "xpack.security.http.ssl.enabled=true" \
|
||||
--env "xpack.security.http.ssl.verification_mode=certificate" \
|
||||
--env "xpack.security.http.ssl.key=certs/testnode.key" \
|
||||
--env "xpack.security.http.ssl.certificate=certs/testnode.crt" \
|
||||
--env "xpack.security.http.ssl.certificate_authorities=certs/ca.crt" \
|
||||
--env "xpack.security.transport.ssl.enabled=true" \
|
||||
--env "xpack.security.transport.ssl.key=certs/testnode.key" \
|
||||
--env "xpack.security.transport.ssl.certificate=certs/testnode.crt" \
|
||||
--env "xpack.security.transport.ssl.certificate_authorities=certs/ca.crt" \
|
||||
--volume "$repo$testnodecrt:/usr/share/elasticsearch/config/certs/testnode.crt" \
|
||||
--volume "$repo$testnodekey:/usr/share/elasticsearch/config/certs/testnode.key" \
|
||||
--volume "$repo$cacrt:/usr/share/elasticsearch/config/certs/ca.crt" \
|
||||
--network=esnet-platinum \
|
||||
--name=elasticsearch-platinum \
|
||||
--detach \
|
||||
docker.elastic.co/elasticsearch/elasticsearch:${ELASTICSEARCH_VERSION}
|
||||
elasticsearch_image="elasticsearch"
|
||||
elasticsearch_url="https://elastic:changeme@${NODE_NAME}:9200"
|
||||
if [[ $TEST_SUITE != "xpack" ]]; then
|
||||
elasticsearch_image="elasticsearch-oss"
|
||||
elasticsearch_url="http://${NODE_NAME}:9200"
|
||||
fi
|
||||
|
||||
ELASTICSEARCH_VERSION="${elasticsearch_image}:${ELASTICSEARCH_VERSION}" \
|
||||
NODE_NAME="${NODE_NAME}" \
|
||||
NETWORK_NAME="esnet" \
|
||||
DETACH=true \
|
||||
SSL_CERT="${repo}${testnodecrt}" \
|
||||
SSL_KEY="${repo}${testnodekey}" \
|
||||
SSL_CA="${repo}${cacrt}" \
|
||||
bash .ci/run-elasticsearch.sh
|
||||
|
||||
# run the client unit and oss integration test
|
||||
docker run \
|
||||
--network=esnet-oss \
|
||||
--env "TEST_ES_SERVER=http://elasticsearch-oss:9200" \
|
||||
--env "CODECOV_TOKEN" \
|
||||
--network=esnet \
|
||||
--env "TEST_ES_SERVER=${elasticsearch_url}" \
|
||||
--volume $repo:/usr/src/app \
|
||||
--volume /usr/src/app/node_modules \
|
||||
--name elasticsearch-js-oss \
|
||||
--rm \
|
||||
elastic/elasticsearch-js \
|
||||
npm run ci
|
||||
|
||||
# run the client platinium integration test
|
||||
docker run \
|
||||
--network=esnet-platinum \
|
||||
--env "TEST_ES_SERVER=https://elastic:changeme@elasticsearch-platinum:9200" \
|
||||
--volume $repo:/usr/src/app \
|
||||
--volume /usr/src/app/node_modules \
|
||||
--name elasticsearch-js-platinum \
|
||||
--name elasticsearch-js \
|
||||
--rm \
|
||||
elastic/elasticsearch-js \
|
||||
npm run test:integration
|
||||
|
||||
@ -1,10 +1,15 @@
|
||||
---
|
||||
ELASTICSEARCH_VERSION:
|
||||
- 6.7.1
|
||||
STACK_VERSION:
|
||||
- 6.8.4
|
||||
|
||||
NODE_JS_VERSION:
|
||||
- 14
|
||||
- 12
|
||||
- 10
|
||||
- 8
|
||||
|
||||
TEST_SUITE:
|
||||
- oss
|
||||
- xpack
|
||||
|
||||
exclude: ~
|
||||
|
||||
16
.github/workflows/backport.yml
vendored
Normal file
16
.github/workflows/backport.yml
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
name: Backport
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- closed
|
||||
- labeled
|
||||
|
||||
jobs:
|
||||
backport:
|
||||
runs-on: ubuntu-latest
|
||||
name: Backport
|
||||
steps:
|
||||
- name: Backport
|
||||
uses: tibdex/backport@v1
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
120
.github/workflows/nodejs.yml
vendored
Normal file
120
.github/workflows/nodejs.yml
vendored
Normal file
@ -0,0 +1,120 @@
|
||||
name: Node CI
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Test
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
node-version: [10.x, 12.x, 14.x]
|
||||
os: [ubuntu-latest, windows-latest, macOS-latest]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Use Node.js ${{ matrix.node-version }}
|
||||
uses: actions/setup-node@v1
|
||||
with:
|
||||
node-version: ${{ matrix.node-version }}
|
||||
|
||||
- name: Install
|
||||
run: |
|
||||
npm install
|
||||
|
||||
- name: Lint
|
||||
run: |
|
||||
npm run lint
|
||||
|
||||
- name: Unit test
|
||||
run: |
|
||||
npm run test:unit
|
||||
|
||||
- name: Behavior test
|
||||
run: |
|
||||
npm run test:behavior
|
||||
|
||||
- name: Type Definitions
|
||||
run: |
|
||||
npm run test:types
|
||||
|
||||
test-node-v8:
|
||||
name: Test
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
node-version: [8.x]
|
||||
os: [ubuntu-latest, windows-latest, macOS-latest]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Use Node.js ${{ matrix.node-version }}
|
||||
uses: actions/setup-node@v1
|
||||
with:
|
||||
node-version: ${{ matrix.node-version }}
|
||||
|
||||
- name: Install
|
||||
run: |
|
||||
npm install
|
||||
|
||||
- name: Test
|
||||
run: |
|
||||
npm run test:unit -- --node-arg=--harmony-async-iteration
|
||||
|
||||
code-coverage:
|
||||
name: Code coverage
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
node-version: [12.x]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Use Node.js ${{ matrix.node-version }}
|
||||
uses: actions/setup-node@v1
|
||||
with:
|
||||
node-version: ${{ matrix.node-version }}
|
||||
|
||||
- name: Install
|
||||
run: |
|
||||
npm install
|
||||
|
||||
- name: Code coverage
|
||||
run: |
|
||||
npm run test:coverage
|
||||
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v1
|
||||
with:
|
||||
file: ./coverage.lcov
|
||||
fail_ci_if_error: true
|
||||
|
||||
license:
|
||||
name: License check
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
node-version: [12.x]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Use Node.js ${{ matrix.node-version }}
|
||||
uses: actions/setup-node@v1
|
||||
with:
|
||||
node-version: ${{ matrix.node-version }}
|
||||
|
||||
- name: Install
|
||||
run: |
|
||||
npm install
|
||||
|
||||
- name: License checker
|
||||
run: |
|
||||
npm run license-checker
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@ -50,8 +50,6 @@ package-lock.json
|
||||
# elasticsearch repo or binary files
|
||||
elasticsearch*
|
||||
|
||||
# Generated typings, we don't commit them
|
||||
# because we should copy them in the main .d.ts file
|
||||
api/generated.d.ts
|
||||
|
||||
test/benchmarks/macro/fixtures/*
|
||||
|
||||
*-junit.xml
|
||||
|
||||
21
.travis.yml
21
.travis.yml
@ -1,21 +0,0 @@
|
||||
dist: trusty
|
||||
|
||||
sudo: required
|
||||
|
||||
language: node_js
|
||||
|
||||
node_js:
|
||||
- "12"
|
||||
- "10"
|
||||
- "8"
|
||||
|
||||
install:
|
||||
- npm install
|
||||
|
||||
script:
|
||||
- npm run license-checker && npm run test
|
||||
|
||||
notifications:
|
||||
email:
|
||||
on_success: never
|
||||
on_failure: always
|
||||
2
LICENSE
2
LICENSE
@ -187,7 +187,7 @@
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
Copyright 2020 Elastic and contributors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
274
README.md
274
README.md
@ -1,23 +1,24 @@
|
||||
<img align="right" width="auto" height="auto" src="https://www.elastic.co/static-res/images/elastic-logo-200.png">
|
||||
|
||||
# @elastic/elasticsearch
|
||||
# Elasticsearch Node.js client
|
||||
|
||||
[](http://standardjs.com/) [](https://clients-ci.elastic.co/job/elastic+elasticsearch-js+master/) [](https://codecov.io/gh/elastic/elasticsearch-js) [](https://www.npmjs.com/package/@elastic/elasticsearch)
|
||||
|
||||
---
|
||||
|
||||
**Note:** In the past months we have worked on the new Elasticsearch Node.js client, and if you want you can already try it by following the instructions below, while if you're going to use the legacy one or report an issue, please check out [elastic/elasticsearch-js-legacy](https://github.com/elastic/elasticsearch-js-legacy).
|
||||
|
||||
---
|
||||
[](http://standardjs.com/) [](https://clients-ci.elastic.co/view/Javascript/job/elastic+elasticsearch-js+master/) [](https://codecov.io/gh/elastic/elasticsearch-js) [](https://www.npmjs.com/package/@elastic/elasticsearch)
|
||||
|
||||
The official Node.js client for Elasticsearch.
|
||||
|
||||
---
|
||||
|
||||
**Note:** In the past months we have worked on the new Elasticsearch Node.js client and you can use it by following the instructions below. If you're going to use the legacy one or report an issue, however, please check out [elastic/elasticsearch-js-legacy](https://github.com/elastic/elasticsearch-js-legacy).
|
||||
|
||||
---
|
||||
|
||||
## Features
|
||||
- One-to-one mapping with REST API.
|
||||
- Generalized, pluggable architecture.
|
||||
- Configurable, automatic discovery of cluster nodes.
|
||||
- Persistent, Keep-Alive connections.
|
||||
- Load balancing (with pluggable selection strategy) across all available nodes.
|
||||
- Load balancing across all available nodes.
|
||||
- Child client support.
|
||||
- TypeScript support out of the box.
|
||||
|
||||
## Install
|
||||
@ -29,19 +30,140 @@ npm install @elastic/elasticsearch
|
||||
|
||||
The minimum supported version of Node.js is `v8`.
|
||||
|
||||
The library is compatible with all Elasticsearch versions since 5.x, but you should use the same major version of the Elasticsearch instance that you are using.
|
||||
The library is compatible with all Elasticsearch versions since 5.x, and you should use the same major version of the Elasticsearch instance that you are using.
|
||||
|
||||
| Elasticsearch Version | Client Version |
|
||||
| --------------------- |----------------|
|
||||
| `master` | `master` |
|
||||
| `7.x` | `7.x` |
|
||||
| `6.x` | `6.x` |
|
||||
| `5.x` | `5.x` |
|
||||
|
||||
To install a specific major of the client, run the following command:
|
||||
```
|
||||
# Elasticsearch 7.x
|
||||
@elastic/elasticsearch@7
|
||||
|
||||
# Elasticsearch 6.x
|
||||
@elastic/elasticsearch@6
|
||||
|
||||
# Elasticsearch 5.x
|
||||
@elastic/elasticsearch@5
|
||||
npm install @elastic/elasticsearch@<major>
|
||||
```
|
||||
|
||||
#### Install multiple versions
|
||||
#### Browser
|
||||
|
||||
WARNING: There is no official support for the browser environment. It exposes your Elasticsearch instance to everyone, which could lead to security issues.
|
||||
We recommend that you write a lightweight proxy that uses this client instead.
|
||||
|
||||
## Documentation
|
||||
|
||||
- [Introduction](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/introduction.html)
|
||||
- [Usage](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-usage.html)
|
||||
- [Client configuration](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/client-configuration.html)
|
||||
- [API reference](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/api-reference.html)
|
||||
- [Breaking changes coming from the old client](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/breaking-changes.html)
|
||||
- [Authentication](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/auth-reference.html)
|
||||
- [Observability](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/observability.html)
|
||||
- [Creating a child client](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/child-client.html)
|
||||
- [Extend the client](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/extend-client.html)
|
||||
- [Typescript support](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/typescript.html)
|
||||
- [Examples](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/examples.html)
|
||||
|
||||
## Quick start
|
||||
|
||||
First of all, require the client and initialize it:
|
||||
```js
|
||||
const { Client } = require('@elastic/elasticsearch')
|
||||
const client = new Client({ node: 'http://localhost:9200' })
|
||||
```
|
||||
|
||||
You can use both the callback-style API and the promise-style API, both behave the same way.
|
||||
```js
|
||||
// promise API
|
||||
const result = await client.search({
|
||||
index: 'my-index',
|
||||
body: {
|
||||
query: {
|
||||
match: { hello: 'world' }
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// callback API
|
||||
client.search({
|
||||
index: 'my-index',
|
||||
body: {
|
||||
query: {
|
||||
match: { hello: 'world' }
|
||||
}
|
||||
}
|
||||
}, (err, result) => {
|
||||
if (err) console.log(err)
|
||||
})
|
||||
```
|
||||
The returned value of **every** API call is formed as follows:
|
||||
```ts
|
||||
{
|
||||
body: object | boolean
|
||||
statusCode: number
|
||||
headers: object
|
||||
warnings: [string]
|
||||
meta: object
|
||||
}
|
||||
```
|
||||
|
||||
Let's see a complete example!
|
||||
```js
|
||||
'use strict'
|
||||
|
||||
const { Client } = require('@elastic/elasticsearch')
|
||||
const client = new Client({ node: 'http://localhost:9200' })
|
||||
|
||||
async function run () {
|
||||
// Let's start by indexing some data
|
||||
await client.index({
|
||||
index: 'game-of-thrones',
|
||||
// type: '_doc', // uncomment this line if you are using Elasticsearch ≤ 6
|
||||
body: {
|
||||
character: 'Ned Stark',
|
||||
quote: 'Winter is coming.'
|
||||
}
|
||||
})
|
||||
|
||||
await client.index({
|
||||
index: 'game-of-thrones',
|
||||
// type: '_doc', // uncomment this line if you are using Elasticsearch ≤ 6
|
||||
body: {
|
||||
character: 'Daenerys Targaryen',
|
||||
quote: 'I am the blood of the dragon.'
|
||||
}
|
||||
})
|
||||
|
||||
await client.index({
|
||||
index: 'game-of-thrones',
|
||||
// type: '_doc', // uncomment this line if you are using Elasticsearch ≤ 6
|
||||
body: {
|
||||
character: 'Tyrion Lannister',
|
||||
quote: 'A mind needs books like a sword needs a whetstone.'
|
||||
}
|
||||
})
|
||||
|
||||
// here we are forcing an index refresh, otherwise we will not
|
||||
// get any result in the consequent search
|
||||
await client.indices.refresh({ index: 'game-of-thrones' })
|
||||
|
||||
// Let's search!
|
||||
const { body } = await client.search({
|
||||
index: 'game-of-thrones',
|
||||
// type: '_doc', // uncomment this line if you are using Elasticsearch ≤ 6
|
||||
body: {
|
||||
query: {
|
||||
match: { quote: 'winter' }
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
console.log(body.hits.hits)
|
||||
}
|
||||
|
||||
run().catch(console.log)
|
||||
```
|
||||
|
||||
## Install multiple versions
|
||||
If you are using multiple versions of Elasticsearch, you need to use multiple versions of the client. In the past, install multiple versions of the same package was not possible, but with `npm v6.9`, you can do that via aliasing.
|
||||
|
||||
The command you must run to install different version of the client is:
|
||||
@ -60,7 +182,7 @@ And your `package.json` will look like the following:
|
||||
"es7": "npm:@elastic/elasticsearch@^7.0.0"
|
||||
}
|
||||
```
|
||||
And finally, you will require the packages from your code by using the alias you have defined.
|
||||
You will require the packages from your code by using the alias you have defined.
|
||||
```js
|
||||
const { Client: Client6 } = require('es6')
|
||||
const { Client: Client7 } = require('es7')
|
||||
@ -77,118 +199,6 @@ Finally, if you want to install the client for the next version of Elasticsearch
|
||||
npm install esmaster@github:elastic/elasticsearch-js
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
You can find the full documentation in our [docs](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/index.html) website.
|
||||
|
||||
```js
|
||||
const { Client } = require('@elastic/elasticsearch')
|
||||
const client = new Client({ node: 'http://localhost:9200' })
|
||||
|
||||
// promise API
|
||||
const result = await client.search({
|
||||
index: 'my-index',
|
||||
body: { foo: 'bar' }
|
||||
})
|
||||
|
||||
// callback API
|
||||
client.search({
|
||||
index: 'my-index',
|
||||
body: { foo: 'bar' }
|
||||
}, (err, result) => {
|
||||
if (err) console.log(err)
|
||||
})
|
||||
```
|
||||
The returned value of **every** API call is formed as follows:
|
||||
```ts
|
||||
{
|
||||
body: object | boolean
|
||||
statusCode: number
|
||||
headers: object
|
||||
warnings: [string]
|
||||
meta: object
|
||||
}
|
||||
```
|
||||
### Client options
|
||||
|
||||
The client is designed to be easily configured as you see fit for your needs, following you can see all the possible options that you can use to configure it.
|
||||
|
||||
```ts
|
||||
{
|
||||
// the Elasticsearch endpoint to use
|
||||
node: string | string[];
|
||||
// alias of above
|
||||
nodes: string | string[];
|
||||
// custom connection class
|
||||
Connection: typeof Connection;
|
||||
// custom connection pool class
|
||||
ConnectionPool: typeof ConnectionPool;
|
||||
// custom transport class
|
||||
Transport: typeof Transport;
|
||||
// custom serializer class
|
||||
Serializer: typeof Serializer;
|
||||
// max number of retries for each request
|
||||
maxRetries: number;
|
||||
// max request timeout for each request
|
||||
requestTimeout: number;
|
||||
// max ping timeout for each request
|
||||
pingTimeout: number;
|
||||
// perform a sniff operation every `n` milliseconds
|
||||
sniffInterval: number;
|
||||
// perform a sniff once the client is started
|
||||
sniffOnStart: boolean;
|
||||
// custom sniff endpoint, defaults `_nodes/_all/http`
|
||||
sniffEndpoint: string;
|
||||
// perform a sniff on connection fault
|
||||
sniffOnConnectionFault: boolean;
|
||||
// configurethe node resurrection strategy, default `ping`
|
||||
resurrectStrategy: 'ping' | 'optimistic' | 'none';
|
||||
// adds `accept-encoding` header to every request
|
||||
suggestCompression: boolean;
|
||||
// enable gzip request body compression
|
||||
compression: 'gzip';
|
||||
// ssl configuraton
|
||||
ssl: http.SecureContextOptions;
|
||||
// http agent options
|
||||
agent: http.AgentOptions;
|
||||
// filters which node not to use for a request
|
||||
nodeFilter: nodeFilterFn;
|
||||
// custom selection strategy, defaults `round-robin`
|
||||
nodeSelector: nodeSelectorFn | string;
|
||||
// function to generate the request id for every request
|
||||
generateRequestId: generateRequestIdFn;
|
||||
// name to identify the client instance in the events
|
||||
name: string;
|
||||
}
|
||||
```
|
||||
|
||||
### Request specific options
|
||||
If needed you can pass request specific options in a second object:
|
||||
```js
|
||||
// promise API
|
||||
const result = await client.search({
|
||||
index: 'my-index',
|
||||
body: { foo: 'bar' }
|
||||
}, {
|
||||
ignore: [404],
|
||||
maxRetries: 3
|
||||
})
|
||||
```
|
||||
The supported *request specific options* are:
|
||||
```ts
|
||||
{
|
||||
ignore: [number], // default `null`
|
||||
requestTimeout: number, // client default
|
||||
maxRetries: number, // default `5`
|
||||
asStream: boolean, // default `false`
|
||||
compression: string, // default `false`
|
||||
headers: object, // default `null`
|
||||
querystring: object // default `null`,
|
||||
context: object // default `null`,
|
||||
id: any // default incr. integer
|
||||
}
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
This software is licensed under the [Apache 2 license](./LICENSE).
|
||||
|
||||
@ -58,8 +58,6 @@ function buildGet (opts) {
|
||||
'_source_exclude',
|
||||
'_source_includes',
|
||||
'_source_include',
|
||||
'_source_exclude',
|
||||
'_source_include',
|
||||
'version',
|
||||
'version_type',
|
||||
'pretty',
|
||||
|
||||
@ -3,7 +3,7 @@ comment: off
|
||||
coverage:
|
||||
precision: 2
|
||||
round: down
|
||||
range: "90...100"
|
||||
range: "95...100"
|
||||
|
||||
status:
|
||||
project: yes
|
||||
@ -3,53 +3,11 @@
|
||||
|
||||
This document contains code snippets to show you how to connect to various Elasticsearch providers.
|
||||
|
||||
=== Basic Auth
|
||||
|
||||
You can provide your credentials in the node(s) URL.
|
||||
|
||||
[source,js]
|
||||
----
|
||||
const { Client } = require('@elastic/elasticsearch')
|
||||
const client = new Client({
|
||||
node: 'https://username:password@localhost:9200'
|
||||
})
|
||||
----
|
||||
|
||||
Or you can use the full node declaration.
|
||||
|
||||
[source,js]
|
||||
----
|
||||
const { URL } = require('url')
|
||||
const { Client } = require('@elastic/elasticsearch')
|
||||
const client = new Client({
|
||||
node: {
|
||||
url: new URL('https://username:password@localhost:9200'),
|
||||
id: 'node-1',
|
||||
...
|
||||
}
|
||||
})
|
||||
----
|
||||
|
||||
=== SSL configuration
|
||||
|
||||
Without any additional configuration you can specify `https://` node urls, but the certificates used to sign these requests will not verified (`rejectUnauthorized: false`). To turn on certificate verification you must specify an `ssl` object either in the top level config or in each host config object and set `rejectUnauthorized: true`. The ssl config object can contain many of the same configuration options that https://nodejs.org/api/tls.html#tls_tls_connect_options_callback[tls.connect()] accepts.
|
||||
|
||||
[source,js]
|
||||
----
|
||||
const { Client } = require('@elastic/elasticsearch')
|
||||
const client = new Client({
|
||||
node: 'http://username:password@localhost:9200',
|
||||
ssl: {
|
||||
ca: fs.readFileSync('./cacert.pem'),
|
||||
rejectUnauthorized: true
|
||||
}
|
||||
})
|
||||
----
|
||||
|
||||
=== Elastic Cloud
|
||||
|
||||
If you are using https://www.elastic.co/cloud[Elastic Cloud], the client offers a easy way to connect to it via the `cloud` option. +
|
||||
You must pass the Cloud ID that you can find in the cloud console, then your username and password.
|
||||
You must pass the Cloud ID that you can find in the cloud console, then your username and password inside the `auth` option.
|
||||
|
||||
NOTE: When connecting to Elastic Cloud, the client will automatically enable both request and response compression by default, since it yields significant throughput improvements. +
|
||||
Moreover, the client will also set the ssl option `secureProtocol` to `TLSv1_2_method` unless specified otherwise.
|
||||
@ -63,8 +21,91 @@ const { Client } = require('@elastic/elasticsearch')
|
||||
const client = new Client({
|
||||
cloud: {
|
||||
id: 'name:bG9jYWxob3N0JGFiY2QkZWZnaA==',
|
||||
},
|
||||
auth: {
|
||||
username: 'elastic',
|
||||
password: 'changeme'
|
||||
}
|
||||
})
|
||||
----
|
||||
----
|
||||
|
||||
=== Basic authentication
|
||||
|
||||
You can provide your credentials by passing the `username` and `password` parameters via the `auth` option.
|
||||
|
||||
NOTE: If you provide both basic authentication credentials and the Api Key configuration, the Api Key will take precedence.
|
||||
|
||||
[source,js]
|
||||
----
|
||||
const { Client } = require('@elastic/elasticsearch')
|
||||
const client = new Client({
|
||||
node: 'https://localhost:9200',
|
||||
auth: {
|
||||
username: 'elastic',
|
||||
password: 'changeme'
|
||||
}
|
||||
})
|
||||
----
|
||||
|
||||
Otherwise, you can provide your credentials in the node(s) URL.
|
||||
|
||||
[source,js]
|
||||
----
|
||||
const { Client } = require('@elastic/elasticsearch')
|
||||
const client = new Client({
|
||||
node: 'https://username:password@localhost:9200'
|
||||
})
|
||||
----
|
||||
|
||||
=== ApiKey authentication
|
||||
|
||||
You can use the https://www.elastic.co/guide/en/elasticsearch/reference/7.16/security-api-create-api-key.html[ApiKey] authentication by passing the `apiKey` parameter via the `auth` option. +
|
||||
The `apiKey` parameter can be either a base64 encoded string or an object with the values that you can obtain from the {ref-7x}/security-api-create-api-key.html[create api key endpoint].
|
||||
|
||||
NOTE: If you provide both basic authentication credentials and the Api Key configuration, the Api Key will take precedence.
|
||||
|
||||
[source,js]
|
||||
----
|
||||
const { Client } = require('@elastic/elasticsearch')
|
||||
const client = new Client({
|
||||
node: 'https://localhost:9200',
|
||||
auth: {
|
||||
apiKey: 'base64EncodedKey'
|
||||
}
|
||||
})
|
||||
----
|
||||
|
||||
[source,js]
|
||||
----
|
||||
const { Client } = require('@elastic/elasticsearch')
|
||||
const client = new Client({
|
||||
node: 'https://localhost:9200',
|
||||
auth: {
|
||||
apiKey: {
|
||||
id: 'foo',
|
||||
api_key: 'bar'
|
||||
}
|
||||
}
|
||||
})
|
||||
----
|
||||
|
||||
|
||||
=== SSL configuration
|
||||
|
||||
Without any additional configuration you can specify `https://` node urls, but the certificates used to sign these requests will not verified (`rejectUnauthorized: false`). To turn on certificate verification you must specify an `ssl` object either in the top level config or in each host config object and set `rejectUnauthorized: true`. The ssl config object can contain many of the same configuration options that https://nodejs.org/api/tls.html#tls_tls_connect_options_callback[tls.connect()] accepts.
|
||||
|
||||
[source,js]
|
||||
----
|
||||
const { Client } = require('@elastic/elasticsearch')
|
||||
const client = new Client({
|
||||
node: 'http://localhost:9200',
|
||||
auth: {
|
||||
username: 'elastic',
|
||||
password: 'changeme'
|
||||
},
|
||||
ssl: {
|
||||
ca: fs.readFileSync('./cacert.pem'),
|
||||
rejectUnauthorized: true
|
||||
}
|
||||
})
|
||||
----
|
||||
|
||||
@ -43,6 +43,28 @@ node: {
|
||||
}
|
||||
----
|
||||
|
||||
|`auth`
|
||||
a|Your authentication data. You can use both Basic authentication and https://www.elastic.co/guide/en/elasticsearch/reference/7.16/security-api-create-api-key.html[ApiKey]. +
|
||||
See https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/auth-reference.html[Authentication] for more details. +
|
||||
_Default:_ `null`
|
||||
|
||||
Basic authentication:
|
||||
[source,js]
|
||||
----
|
||||
auth: {
|
||||
username: 'elastic',
|
||||
password: 'changeme'
|
||||
}
|
||||
----
|
||||
https://www.elastic.co/guide/en/elasticsearch/reference/7.16/security-api-create-api-key.html[ApiKey] authentication:
|
||||
[source,js]
|
||||
----
|
||||
auth: {
|
||||
apiKey: 'base64EncodedKey'
|
||||
}
|
||||
----
|
||||
|
||||
|
||||
|`maxRetries`
|
||||
|`number` - Max number of retries for each request. +
|
||||
_Default:_ `3`
|
||||
@ -64,7 +86,7 @@ _Default:_ `false`
|
||||
_Default:_ `false`
|
||||
|
||||
|`sniffEndpoint`
|
||||
|`string` - Max request timeout for each request. +
|
||||
|`string` - Endpoint to ping during a sniff. +
|
||||
_Default:_ `'_nodes/_all/http'`
|
||||
|
||||
|`sniffOnConnectionFault`
|
||||
@ -150,6 +172,33 @@ function generateRequestId (params, options) {
|
||||
|`name`
|
||||
|`string` - The name to identify the client instance in the events. +
|
||||
_Default:_ `elasticsearch-js`
|
||||
|
||||
|`opaqueIdPrefix`
|
||||
|`string` - A string that will be use to prefix any `X-Opaque-Id` header. +
|
||||
See https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/observability.html#_x-opaque-id_support[`X-Opaque-Id` support] for more details. +
|
||||
_Default:_ `null`
|
||||
|
||||
|`headers`
|
||||
|`object` - A set of custom headers to send in every request. +
|
||||
_Default:_ `{}`
|
||||
|
||||
|`cloud`
|
||||
a|`object` - Custom configuration for connecting to https://cloud.elastic.co[Elastic Cloud]. See https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/auth-reference.html[Authentication] for more details. +
|
||||
_Default:_ `null` +
|
||||
_Cloud configuration example:_
|
||||
[source,js]
|
||||
----
|
||||
const client = new Client({
|
||||
cloud: {
|
||||
id: 'name:bG9jYWxob3N0JGFiY2QkZWZnaA=='
|
||||
},
|
||||
auth: {
|
||||
username: 'elastic',
|
||||
password: 'changeme'
|
||||
}
|
||||
})
|
||||
----
|
||||
|
||||
|===
|
||||
|
||||
=== Advanced configuration
|
||||
@ -184,7 +233,7 @@ Sometimes you just need to inject a little snippet of your code and then continu
|
||||
class MyTransport extends Transport {
|
||||
request (params, options, callback) {
|
||||
// your code
|
||||
super.request(params, options, callback)
|
||||
return super.request(params, options, callback)
|
||||
}
|
||||
}
|
||||
----
|
||||
|
||||
@ -8,57 +8,83 @@ This can greatly increase the indexing speed.
|
||||
----
|
||||
'use strict'
|
||||
|
||||
require('array.prototype.flatmap').shim()
|
||||
const { Client } = require('@elastic/elasticsearch')
|
||||
const client = new Client({ node: 'http://localhost:9200' })
|
||||
const client = new Client({
|
||||
node: 'http://localhost:9200'
|
||||
})
|
||||
|
||||
async function run () {
|
||||
const { body: bulkResponse } = await client.bulk({
|
||||
// here we are forcing an index refresh,
|
||||
// otherwise we will not get any result
|
||||
// in the consequent search
|
||||
refresh: true,
|
||||
body: [
|
||||
// operation to perform
|
||||
{ index: { _index: 'game-of-thrones' } },
|
||||
// the document to index
|
||||
{
|
||||
character: 'Ned Stark',
|
||||
quote: 'Winter is coming.'
|
||||
},
|
||||
|
||||
{ index: { _index: 'game-of-thrones' } },
|
||||
{
|
||||
character: 'Daenerys Targaryen',
|
||||
quote: 'I am the blood of the dragon.'
|
||||
},
|
||||
|
||||
{ index: { _index: 'game-of-thrones' } },
|
||||
{
|
||||
character: 'Tyrion Lannister',
|
||||
quote: 'A mind needs books like a sword needs a whetstone.'
|
||||
}
|
||||
]
|
||||
})
|
||||
|
||||
if (bulkResponse.errors) {
|
||||
console.log(bulkResponse)
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
// Let's search!
|
||||
const { body } = await client.search({
|
||||
index: 'game-of-thrones',
|
||||
await client.indices.create({
|
||||
index: 'tweets',
|
||||
body: {
|
||||
query: {
|
||||
match: {
|
||||
quote: 'winter'
|
||||
mappings: {
|
||||
properties: {
|
||||
id: { type: 'integer' },
|
||||
text: { type: 'text' },
|
||||
user: { type: 'keyword' },
|
||||
time: { type: 'date' }
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}, { ignore: [400] })
|
||||
|
||||
console.log(body.hits.hits)
|
||||
const dataset = [{
|
||||
id: 1,
|
||||
text: 'If I fall, don\'t bring me back.',
|
||||
user: 'jon',
|
||||
date: new Date()
|
||||
}, {
|
||||
id: 2,
|
||||
text: 'Witer is coming',
|
||||
user: 'ned',
|
||||
date: new Date()
|
||||
}, {
|
||||
id: 3,
|
||||
text: 'A Lannister always pays his debts.',
|
||||
user: 'tyrion',
|
||||
date: new Date()
|
||||
}, {
|
||||
id: 4,
|
||||
text: 'I am the blood of the dragon.',
|
||||
user: 'daenerys',
|
||||
date: new Date()
|
||||
}, {
|
||||
id: 5, // change this value to a string to see the bulk response with errors
|
||||
text: 'A girl is Arya Stark of Winterfell. And I\'m going home.',
|
||||
user: 'arya',
|
||||
date: new Date()
|
||||
}]
|
||||
|
||||
const body = dataset.flatMap(doc => [{ index: { _index: 'tweets' } }, doc])
|
||||
|
||||
const { body: bulkResponse } = await client.bulk({ refresh: true, body })
|
||||
|
||||
if (bulkResponse.errors) {
|
||||
const erroredDocuments = []
|
||||
// The items array has the same order of the dataset we just indexed.
|
||||
// The presence of the `error` key indicates that the operation
|
||||
// that we did for the document has failed.
|
||||
bulkResponse.items.forEach((action, i) => {
|
||||
const operation = Object.keys(action)[0]
|
||||
if (action[operation].error) {
|
||||
erroredDocuments.push({
|
||||
// If the status is 429 it means that you can retry the document,
|
||||
// otherwise it's very likely a mapping error, and you should
|
||||
// fix the document before to try it again.
|
||||
status: action[operation].status,
|
||||
error: action[operation].error,
|
||||
operation: body[i * 2],
|
||||
document: body[i * 2 + 1]
|
||||
})
|
||||
}
|
||||
})
|
||||
console.log(erroredDocuments)
|
||||
}
|
||||
|
||||
const { body: count } = await client.count({ index: 'tweets' })
|
||||
console.log(count)
|
||||
}
|
||||
|
||||
run().catch(console.log)
|
||||
----
|
||||
----
|
||||
|
||||
@ -59,7 +59,7 @@ async function run (): void {
|
||||
client
|
||||
.search(params)
|
||||
.then((result: ApiResponse) => {
|
||||
console.og(result.body.hits.hits)
|
||||
console.log(result.body.hits.hits)
|
||||
})
|
||||
.catch((err: Error) => {
|
||||
console.log(err)
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
= @elastic/elasticsearch
|
||||
= Elasticsearch Node.js client
|
||||
|
||||
:branch: 6.7
|
||||
include::{asciidoc-dir}/../../shared/attributes.asciidoc[]
|
||||
|
||||
@ -8,7 +8,8 @@ The official Node.js client for Elasticsearch.
|
||||
* Generalized, pluggable architecture.
|
||||
* Configurable, automatic discovery of cluster nodes.
|
||||
* Persistent, Keep-Alive connections.
|
||||
* Load balancing (with pluggable selection strategy) across all available nodes.
|
||||
* Load balancing across all available nodes.
|
||||
* Child client support.
|
||||
* TypeScript support out of the box.
|
||||
|
||||
=== Install
|
||||
@ -18,25 +19,157 @@ npm install @elastic/elasticsearch
|
||||
----
|
||||
|
||||
=== Compatibility
|
||||
|
||||
The minimum supported version of Node.js is `v8`.
|
||||
|
||||
The library is compatible with all Elasticsearch versions since 5.x, but you should use the same major version of the Elasticsearch instance that you are using.
|
||||
The library is compatible with all {es} versions since 5.x. We recommend you to
|
||||
use the same major version of the client as the {es} instance that you are
|
||||
using.
|
||||
|
||||
|
||||
[%header,cols=2*]
|
||||
|===
|
||||
|Elasticsearch Version
|
||||
|Client Version
|
||||
|
||||
|`master`
|
||||
|`master`
|
||||
|
||||
|`7.x`
|
||||
|`7.x`
|
||||
|
||||
|`6.x`
|
||||
|`6.x`
|
||||
|
||||
|`5.x`
|
||||
|`5.x`
|
||||
|===
|
||||
|
||||
To install a specific major of the client, run the following command:
|
||||
----
|
||||
npm install @elastic/elasticsearch@<major>
|
||||
----
|
||||
# Elasticsearch 7.x
|
||||
@elastic/elasticsearch@7
|
||||
|
||||
# Elasticsearch 6.x
|
||||
@elastic/elasticsearch@6
|
||||
==== Browser
|
||||
|
||||
# Elasticsearch 5.x
|
||||
@elastic/elasticsearch@5
|
||||
WARNING: There is no official support for the browser environment. It exposes
|
||||
your {es} instance to everyone, which could lead to security issues. We
|
||||
recommend you to write a lightweight proxy that uses this client instead.
|
||||
|
||||
|
||||
=== Quick start
|
||||
|
||||
First of all, require the client and initialize it:
|
||||
[source,js]
|
||||
----
|
||||
const { Client } = require('@elastic/elasticsearch')
|
||||
const client = new Client({ node: 'http://localhost:9200' })
|
||||
----
|
||||
|
||||
You can use both the callback-style API and the promise-style API, both behave the same way.
|
||||
[source,js]
|
||||
----
|
||||
// promise API
|
||||
const result = await client.search({
|
||||
index: 'my-index',
|
||||
body: {
|
||||
query: {
|
||||
match: { hello: 'world' }
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// callback API
|
||||
client.search({
|
||||
index: 'my-index',
|
||||
body: {
|
||||
query: {
|
||||
match: { hello: 'world' }
|
||||
}
|
||||
}
|
||||
}, (err, result) => {
|
||||
if (err) console.log(err)
|
||||
})
|
||||
----
|
||||
The returned value of **every** API call is formed as follows:
|
||||
[source,ts]
|
||||
----
|
||||
{
|
||||
body: object | boolean
|
||||
statusCode: number
|
||||
headers: object
|
||||
warnings: [string]
|
||||
meta: object
|
||||
}
|
||||
----
|
||||
|
||||
Let's see a complete example!
|
||||
[source,js]
|
||||
----
|
||||
'use strict'
|
||||
|
||||
const { Client } = require('@elastic/elasticsearch')
|
||||
const client = new Client({ node: 'http://localhost:9200' })
|
||||
|
||||
async function run () {
|
||||
// Let's start by indexing some data
|
||||
await client.index({
|
||||
index: 'game-of-thrones',
|
||||
// type: '_doc', // uncomment this line if you are using Elasticsearch ≤ 6
|
||||
body: {
|
||||
character: 'Ned Stark',
|
||||
quote: 'Winter is coming.'
|
||||
}
|
||||
})
|
||||
|
||||
await client.index({
|
||||
index: 'game-of-thrones',
|
||||
// type: '_doc', // uncomment this line if you are using Elasticsearch ≤ 6
|
||||
body: {
|
||||
character: 'Daenerys Targaryen',
|
||||
quote: 'I am the blood of the dragon.'
|
||||
}
|
||||
})
|
||||
|
||||
await client.index({
|
||||
index: 'game-of-thrones',
|
||||
// type: '_doc', // uncomment this line if you are using Elasticsearch ≤ 6
|
||||
body: {
|
||||
character: 'Tyrion Lannister',
|
||||
quote: 'A mind needs books like a sword needs a whetstone.'
|
||||
}
|
||||
})
|
||||
|
||||
// here we are forcing an index refresh, otherwise we will not
|
||||
// get any result in the consequent search
|
||||
await client.indices.refresh({ index: 'game-of-thrones' })
|
||||
|
||||
// Let's search!
|
||||
const { body } = await client.search({
|
||||
index: 'game-of-thrones',
|
||||
// type: '_doc', // uncomment this line if you are using Elasticsearch ≤ 6
|
||||
body: {
|
||||
query: {
|
||||
match: { quote: 'winter' }
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
console.log(body.hits.hits)
|
||||
}
|
||||
|
||||
run().catch(console.log)
|
||||
----
|
||||
|
||||
==== Install multiple versions
|
||||
If you are using multiple versions of Elasticsearch, you need to use multiple versions of the client. +
|
||||
In the past, install multiple versions of the same package was not possible, but with `npm v6.9`, you can do that via aliasing.
|
||||
|
||||
The command you must run to install different version of the client is:
|
||||
If you are using multiple versions of {es}, you need to use multiple versions of
|
||||
the client as well. In the past, installing multiple versions of the same
|
||||
package was not possible, but with `npm v6.9`, you can do it via aliasing.
|
||||
|
||||
To install different version of the client, run the following command:
|
||||
|
||||
[source,sh]
|
||||
----
|
||||
@ -59,7 +192,7 @@ And your `package.json` will look like the following:
|
||||
}
|
||||
----
|
||||
|
||||
And finally, you will require the packages from your code by using the alias you have defined.
|
||||
You will require the packages from your code by using the alias you have defined.
|
||||
|
||||
[source,js]
|
||||
----
|
||||
@ -73,9 +206,13 @@ client6.info(console.log)
|
||||
client7.info(console.log)
|
||||
----
|
||||
|
||||
Finally, if you want to install the client for the next version of Elasticsearch (the one that lives in Elasticsearch's master branch), you can use the following command:
|
||||
|
||||
Finally, if you want to install the client for the next version of {es} (the one
|
||||
that lives in the {es} master branch), use the following command:
|
||||
|
||||
[source,sh]
|
||||
----
|
||||
npm install esmaster@github:elastic/elasticsearch-js
|
||||
----
|
||||
WARNING: This command will install the master branch of the client, which is not considered stable.
|
||||
WARNING: This command installs the master branch of the client which is not
|
||||
considered stable.
|
||||
|
||||
@ -248,3 +248,46 @@ child.search({
|
||||
if (err) console.log(err)
|
||||
})
|
||||
----
|
||||
|
||||
=== X-Opaque-Id support
|
||||
To improve the overall observability, the client offers an easy way to configure the `X-Opaque-Id` header. If you set the `X-Opaque-Id` in a specific request, this will allow you to discover this identifier in the https://www.elastic.co/guide/en/elasticsearch/reference/6.8/logging.html#deprecation-logging[deprecation logs], help you with https://www.elastic.co/guide/en/elasticsearch/reference/6.8/index-modules-slowlog.html[identifying search slow log origin] as well as https://www.elastic.co/guide/en/elasticsearch/reference/6.8/tasks.html#_identifying_running_tasks[identifying running tasks].
|
||||
|
||||
The `X-Opaque-Id` should be configured in each request, for doing that you can use the `opaqueId` option, as you can see in the following example. +
|
||||
The resulting header will be `{ 'X-Opaque-Id': 'my-search' }`.
|
||||
|
||||
[source,js]
|
||||
----
|
||||
const { Client } = require('@elastic/elasticsearch')
|
||||
const client = new Client({
|
||||
node: 'http://localhost:9200'
|
||||
})
|
||||
|
||||
client.search({
|
||||
index: 'my-index',
|
||||
body: { foo: 'bar' }
|
||||
}, {
|
||||
opaqueId: 'my-search'
|
||||
}, (err, result) => {
|
||||
if (err) console.log(err)
|
||||
})
|
||||
----
|
||||
|
||||
Sometimes it may be useful to prefix all the `X-Opaque-Id` headers with a specific string, in case you need to identify a specific client or server. For doing this, the client offers a top-level configuration option: `opaqueIdPrefix`. +
|
||||
In the following example, the resulting header will be `{ 'X-Opaque-Id': 'proxy-client::my-search' }`.
|
||||
[source,js]
|
||||
----
|
||||
const { Client } = require('@elastic/elasticsearch')
|
||||
const client = new Client({
|
||||
node: 'http://localhost:9200',
|
||||
opaqueIdPrefix: 'proxy-client::'
|
||||
})
|
||||
|
||||
client.search({
|
||||
index: 'my-index',
|
||||
body: { foo: 'bar' }
|
||||
}, {
|
||||
opaqueId: 'my-search'
|
||||
}, (err, result) => {
|
||||
if (err) console.log(err)
|
||||
})
|
||||
----
|
||||
|
||||
5
docs/page_header.html
Normal file
5
docs/page_header.html
Normal file
@ -0,0 +1,5 @@
|
||||
<p>
|
||||
<strong>NOTE</strong>: You are looking at documentation for an older release.
|
||||
For the latest information, see the
|
||||
<a href="../current/index.html">current release documentation</a>.
|
||||
</p>
|
||||
File diff suppressed because it is too large
Load Diff
@ -52,7 +52,7 @@ interface SearchResponse<T> {
|
||||
}
|
||||
}
|
||||
|
||||
// Define the intefrace of the source object
|
||||
// Define the interface of the source object
|
||||
interface Source {
|
||||
foo: string
|
||||
}
|
||||
@ -123,7 +123,7 @@ interface SearchResponse<T> {
|
||||
aggregations?: any;
|
||||
}
|
||||
|
||||
// Define the intefrace of the source object
|
||||
// Define the interface of the source object
|
||||
interface Source {
|
||||
foo: string
|
||||
}
|
||||
|
||||
@ -11,13 +11,21 @@ const client = new Client({ node: 'http://localhost:9200' })
|
||||
// promise API
|
||||
const result = await client.search({
|
||||
index: 'my-index',
|
||||
body: { foo: 'bar' }
|
||||
body: {
|
||||
query: {
|
||||
match: { hello: 'world' }
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// callback API
|
||||
client.search({
|
||||
index: 'my-index',
|
||||
body: { foo: 'bar' }
|
||||
body: {
|
||||
query: {
|
||||
match: { hello: 'world' }
|
||||
}
|
||||
}
|
||||
}, (err, result) => {
|
||||
if (err) console.log(err)
|
||||
})
|
||||
@ -47,13 +55,21 @@ The `meta` key contains all the information regarding the request, such as attem
|
||||
// promise API
|
||||
const { body } = await client.search({
|
||||
index: 'my-index',
|
||||
body: { foo: 'bar' }
|
||||
body: {
|
||||
query: {
|
||||
match: { hello: 'world' }
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// callback API
|
||||
client.search({
|
||||
index: 'my-index',
|
||||
body: { foo: 'bar' }
|
||||
body: {
|
||||
query: {
|
||||
match: { hello: 'world' }
|
||||
}
|
||||
}
|
||||
}, (err, { body }) => {
|
||||
if (err) console.log(err)
|
||||
})
|
||||
@ -68,7 +84,11 @@ When using the callback style API, the function will also return an object that
|
||||
// calback API
|
||||
const request = client.search({
|
||||
index: 'my-index',
|
||||
body: { foo: 'bar' }
|
||||
body: {
|
||||
query: {
|
||||
match: { hello: 'world' }
|
||||
}
|
||||
}
|
||||
}, {
|
||||
ignore: [404],
|
||||
maxRetries: 3
|
||||
@ -98,7 +118,11 @@ function abortableRequest (params, options) {
|
||||
|
||||
const request = abortableRequest({
|
||||
index: 'my-index',
|
||||
body: { foo: 'bar' }
|
||||
body: {
|
||||
query: {
|
||||
match: { hello: 'world' }
|
||||
}
|
||||
}
|
||||
}, {
|
||||
ignore: [404],
|
||||
maxRetries: 3
|
||||
@ -115,16 +139,24 @@ If needed you can pass request specific options in a second object:
|
||||
// promise API
|
||||
const result = await client.search({
|
||||
index: 'my-index',
|
||||
body: { foo: 'bar' }
|
||||
body: {
|
||||
query: {
|
||||
match: { hello: 'world' }
|
||||
}
|
||||
}
|
||||
}, {
|
||||
ignore: [404],
|
||||
maxRetries: 3
|
||||
})
|
||||
|
||||
// calback API
|
||||
// callback API
|
||||
client.search({
|
||||
index: 'my-index',
|
||||
body: { foo: 'bar' }
|
||||
body: {
|
||||
query: {
|
||||
match: { hello: 'world' }
|
||||
}
|
||||
}
|
||||
}, {
|
||||
ignore: [404],
|
||||
maxRetries: 3
|
||||
@ -187,7 +219,7 @@ console.log(errors)
|
||||
Following you can find the errors exported by the client.
|
||||
[cols=2*]
|
||||
|===
|
||||
|`ElasticsearchClientErrors`
|
||||
|`ElasticsearchClientError`
|
||||
|Every error inherits from this class, it is the basic error generated by the client.
|
||||
|
||||
|`TimeoutError`
|
||||
@ -197,7 +229,7 @@ Following you can find the errors exported by the client.
|
||||
|Generated when an error occurs during the reequest, it can be a connection error or a malformed stream of data.
|
||||
|
||||
|`NoLivingConnectionsError`
|
||||
|Generated in case of all connections present in the connection pool are dead.
|
||||
|Given the configuration, the ConnectionPool was not able to find a usable Connection for this request.
|
||||
|
||||
|`SerializationError`
|
||||
|Generated if the serialization fails.
|
||||
|
||||
411
index.d.ts
vendored
411
index.d.ts
vendored
@ -20,7 +20,7 @@
|
||||
/// <reference types="node" />
|
||||
|
||||
import { EventEmitter } from 'events';
|
||||
import { SecureContextOptions } from 'tls';
|
||||
import { ConnectionOptions as TlsConnectionOptions } from 'tls';
|
||||
import Transport, {
|
||||
ApiResponse,
|
||||
RequestEvent,
|
||||
@ -31,8 +31,9 @@ import Transport, {
|
||||
generateRequestIdFn,
|
||||
TransportRequestCallback
|
||||
} from './lib/Transport';
|
||||
import { URL } from 'url';
|
||||
import Connection, { AgentOptions, agentFn } from './lib/Connection';
|
||||
import ConnectionPool, { ResurrectEvent } from './lib/ConnectionPool';
|
||||
import { ConnectionPool, ResurrectEvent, BasicAuth, ApiKeyAuth } from './lib/pool';
|
||||
import Serializer from './lib/Serializer';
|
||||
import * as RequestParams from './api/requestParams';
|
||||
import * as errors from './lib/errors';
|
||||
@ -72,8 +73,22 @@ interface ClientExtends {
|
||||
}
|
||||
// /Extend API
|
||||
|
||||
interface NodeOptions {
|
||||
url: URL;
|
||||
id?: string;
|
||||
agent?: AgentOptions;
|
||||
ssl?: TlsConnectionOptions;
|
||||
headers?: anyObject;
|
||||
roles?: {
|
||||
master: boolean;
|
||||
data: boolean;
|
||||
ingest: boolean;
|
||||
ml: boolean;
|
||||
}
|
||||
}
|
||||
|
||||
interface ClientOptions {
|
||||
node?: string | string[];
|
||||
node?: string | string[] | NodeOptions | NodeOptions[];
|
||||
nodes?: string | string[];
|
||||
Connection?: typeof Connection;
|
||||
ConnectionPool?: typeof ConnectionPool;
|
||||
@ -82,24 +97,27 @@ interface ClientOptions {
|
||||
maxRetries?: number;
|
||||
requestTimeout?: number;
|
||||
pingTimeout?: number;
|
||||
sniffInterval?: number;
|
||||
sniffInterval?: number | boolean;
|
||||
sniffOnStart?: boolean;
|
||||
sniffEndpoint?: string;
|
||||
sniffOnConnectionFault?: boolean;
|
||||
resurrectStrategy?: 'ping' | 'optimistic' | 'none';
|
||||
suggestCompression?: boolean;
|
||||
compression?: 'gzip';
|
||||
ssl?: SecureContextOptions;
|
||||
ssl?: TlsConnectionOptions;
|
||||
agent?: AgentOptions | agentFn;
|
||||
nodeFilter?: nodeFilterFn;
|
||||
nodeSelector?: nodeSelectorFn | string;
|
||||
headers?: anyObject;
|
||||
opaqueIdPrefix?: string;
|
||||
generateRequestId?: generateRequestIdFn;
|
||||
name?: string;
|
||||
auth?: BasicAuth | ApiKeyAuth;
|
||||
cloud?: {
|
||||
id: string;
|
||||
username: string;
|
||||
password: string;
|
||||
// TODO: remove username and password here in 8
|
||||
username?: string;
|
||||
password?: string;
|
||||
}
|
||||
}
|
||||
|
||||
@ -111,6 +129,7 @@ declare class Client extends EventEmitter {
|
||||
extend: ClientExtends;
|
||||
child(opts?: ClientOptions): Client;
|
||||
close(callback?: Function): Promise<void> | void;
|
||||
/* GENERATED */
|
||||
bulk: ApiMethod<RequestParams.Bulk>
|
||||
cat: {
|
||||
aliases: ApiMethod<RequestParams.CatAliases>
|
||||
@ -356,216 +375,217 @@ declare class Client extends EventEmitter {
|
||||
updateByQueryRethrottle: ApiMethod<RequestParams.UpdateByQueryRethrottle>
|
||||
xpack: {
|
||||
graph: {
|
||||
explore: ApiMethod<RequestParams.XpackGraphExplore>
|
||||
explore: ApiMethod<RequestParams.XpackGraphExplore>
|
||||
}
|
||||
info: ApiMethod<RequestParams.XpackInfo>
|
||||
license: {
|
||||
delete: ApiMethod<RequestParams.XpackLicenseDelete>
|
||||
get: ApiMethod<RequestParams.XpackLicenseGet>
|
||||
get_basic_status: ApiMethod<RequestParams.XpackLicenseGetBasicStatus>
|
||||
getBasicStatus: ApiMethod<RequestParams.XpackLicenseGetBasicStatus>
|
||||
get_trial_status: ApiMethod<RequestParams.XpackLicenseGetTrialStatus>
|
||||
getTrialStatus: ApiMethod<RequestParams.XpackLicenseGetTrialStatus>
|
||||
post: ApiMethod<RequestParams.XpackLicensePost>
|
||||
post_start_basic: ApiMethod<RequestParams.XpackLicensePostStartBasic>
|
||||
postStartBasic: ApiMethod<RequestParams.XpackLicensePostStartBasic>
|
||||
post_start_trial: ApiMethod<RequestParams.XpackLicensePostStartTrial>
|
||||
postStartTrial: ApiMethod<RequestParams.XpackLicensePostStartTrial>
|
||||
delete: ApiMethod<RequestParams.XpackLicenseDelete>
|
||||
get: ApiMethod<RequestParams.XpackLicenseGet>
|
||||
get_basic_status: ApiMethod<RequestParams.XpackLicenseGetBasicStatus>
|
||||
getBasicStatus: ApiMethod<RequestParams.XpackLicenseGetBasicStatus>
|
||||
get_trial_status: ApiMethod<RequestParams.XpackLicenseGetTrialStatus>
|
||||
getTrialStatus: ApiMethod<RequestParams.XpackLicenseGetTrialStatus>
|
||||
post: ApiMethod<RequestParams.XpackLicensePost>
|
||||
post_start_basic: ApiMethod<RequestParams.XpackLicensePostStartBasic>
|
||||
postStartBasic: ApiMethod<RequestParams.XpackLicensePostStartBasic>
|
||||
post_start_trial: ApiMethod<RequestParams.XpackLicensePostStartTrial>
|
||||
postStartTrial: ApiMethod<RequestParams.XpackLicensePostStartTrial>
|
||||
}
|
||||
migration: {
|
||||
deprecations: ApiMethod<RequestParams.XpackMigrationDeprecations>
|
||||
get_assistance: ApiMethod<RequestParams.XpackMigrationGetAssistance>
|
||||
getAssistance: ApiMethod<RequestParams.XpackMigrationGetAssistance>
|
||||
upgrade: ApiMethod<RequestParams.XpackMigrationUpgrade>
|
||||
deprecations: ApiMethod<RequestParams.XpackMigrationDeprecations>
|
||||
get_assistance: ApiMethod<RequestParams.XpackMigrationGetAssistance>
|
||||
getAssistance: ApiMethod<RequestParams.XpackMigrationGetAssistance>
|
||||
upgrade: ApiMethod<RequestParams.XpackMigrationUpgrade>
|
||||
}
|
||||
ml: {
|
||||
close_job: ApiMethod<RequestParams.XpackMlCloseJob>
|
||||
closeJob: ApiMethod<RequestParams.XpackMlCloseJob>
|
||||
delete_calendar: ApiMethod<RequestParams.XpackMlDeleteCalendar>
|
||||
deleteCalendar: ApiMethod<RequestParams.XpackMlDeleteCalendar>
|
||||
delete_calendar_event: ApiMethod<RequestParams.XpackMlDeleteCalendarEvent>
|
||||
deleteCalendarEvent: ApiMethod<RequestParams.XpackMlDeleteCalendarEvent>
|
||||
delete_calendar_job: ApiMethod<RequestParams.XpackMlDeleteCalendarJob>
|
||||
deleteCalendarJob: ApiMethod<RequestParams.XpackMlDeleteCalendarJob>
|
||||
delete_datafeed: ApiMethod<RequestParams.XpackMlDeleteDatafeed>
|
||||
deleteDatafeed: ApiMethod<RequestParams.XpackMlDeleteDatafeed>
|
||||
delete_expired_data: ApiMethod<RequestParams.XpackMlDeleteExpiredData>
|
||||
deleteExpiredData: ApiMethod<RequestParams.XpackMlDeleteExpiredData>
|
||||
delete_filter: ApiMethod<RequestParams.XpackMlDeleteFilter>
|
||||
deleteFilter: ApiMethod<RequestParams.XpackMlDeleteFilter>
|
||||
delete_forecast: ApiMethod<RequestParams.XpackMlDeleteForecast>
|
||||
deleteForecast: ApiMethod<RequestParams.XpackMlDeleteForecast>
|
||||
delete_job: ApiMethod<RequestParams.XpackMlDeleteJob>
|
||||
deleteJob: ApiMethod<RequestParams.XpackMlDeleteJob>
|
||||
delete_model_snapshot: ApiMethod<RequestParams.XpackMlDeleteModelSnapshot>
|
||||
deleteModelSnapshot: ApiMethod<RequestParams.XpackMlDeleteModelSnapshot>
|
||||
find_file_structure: ApiMethod<RequestParams.XpackMlFindFileStructure>
|
||||
findFileStructure: ApiMethod<RequestParams.XpackMlFindFileStructure>
|
||||
flush_job: ApiMethod<RequestParams.XpackMlFlushJob>
|
||||
flushJob: ApiMethod<RequestParams.XpackMlFlushJob>
|
||||
forecast: ApiMethod<RequestParams.XpackMlForecast>
|
||||
get_buckets: ApiMethod<RequestParams.XpackMlGetBuckets>
|
||||
getBuckets: ApiMethod<RequestParams.XpackMlGetBuckets>
|
||||
get_calendar_events: ApiMethod<RequestParams.XpackMlGetCalendarEvents>
|
||||
getCalendarEvents: ApiMethod<RequestParams.XpackMlGetCalendarEvents>
|
||||
get_calendars: ApiMethod<RequestParams.XpackMlGetCalendars>
|
||||
getCalendars: ApiMethod<RequestParams.XpackMlGetCalendars>
|
||||
get_categories: ApiMethod<RequestParams.XpackMlGetCategories>
|
||||
getCategories: ApiMethod<RequestParams.XpackMlGetCategories>
|
||||
get_datafeed_stats: ApiMethod<RequestParams.XpackMlGetDatafeedStats>
|
||||
getDatafeedStats: ApiMethod<RequestParams.XpackMlGetDatafeedStats>
|
||||
get_datafeeds: ApiMethod<RequestParams.XpackMlGetDatafeeds>
|
||||
getDatafeeds: ApiMethod<RequestParams.XpackMlGetDatafeeds>
|
||||
get_filters: ApiMethod<RequestParams.XpackMlGetFilters>
|
||||
getFilters: ApiMethod<RequestParams.XpackMlGetFilters>
|
||||
get_influencers: ApiMethod<RequestParams.XpackMlGetInfluencers>
|
||||
getInfluencers: ApiMethod<RequestParams.XpackMlGetInfluencers>
|
||||
get_job_stats: ApiMethod<RequestParams.XpackMlGetJobStats>
|
||||
getJobStats: ApiMethod<RequestParams.XpackMlGetJobStats>
|
||||
get_jobs: ApiMethod<RequestParams.XpackMlGetJobs>
|
||||
getJobs: ApiMethod<RequestParams.XpackMlGetJobs>
|
||||
get_model_snapshots: ApiMethod<RequestParams.XpackMlGetModelSnapshots>
|
||||
getModelSnapshots: ApiMethod<RequestParams.XpackMlGetModelSnapshots>
|
||||
get_overall_buckets: ApiMethod<RequestParams.XpackMlGetOverallBuckets>
|
||||
getOverallBuckets: ApiMethod<RequestParams.XpackMlGetOverallBuckets>
|
||||
get_records: ApiMethod<RequestParams.XpackMlGetRecords>
|
||||
getRecords: ApiMethod<RequestParams.XpackMlGetRecords>
|
||||
info: ApiMethod<RequestParams.XpackMlInfo>
|
||||
open_job: ApiMethod<RequestParams.XpackMlOpenJob>
|
||||
openJob: ApiMethod<RequestParams.XpackMlOpenJob>
|
||||
post_calendar_events: ApiMethod<RequestParams.XpackMlPostCalendarEvents>
|
||||
postCalendarEvents: ApiMethod<RequestParams.XpackMlPostCalendarEvents>
|
||||
post_data: ApiMethod<RequestParams.XpackMlPostData>
|
||||
postData: ApiMethod<RequestParams.XpackMlPostData>
|
||||
preview_datafeed: ApiMethod<RequestParams.XpackMlPreviewDatafeed>
|
||||
previewDatafeed: ApiMethod<RequestParams.XpackMlPreviewDatafeed>
|
||||
put_calendar: ApiMethod<RequestParams.XpackMlPutCalendar>
|
||||
putCalendar: ApiMethod<RequestParams.XpackMlPutCalendar>
|
||||
put_calendar_job: ApiMethod<RequestParams.XpackMlPutCalendarJob>
|
||||
putCalendarJob: ApiMethod<RequestParams.XpackMlPutCalendarJob>
|
||||
put_datafeed: ApiMethod<RequestParams.XpackMlPutDatafeed>
|
||||
putDatafeed: ApiMethod<RequestParams.XpackMlPutDatafeed>
|
||||
put_filter: ApiMethod<RequestParams.XpackMlPutFilter>
|
||||
putFilter: ApiMethod<RequestParams.XpackMlPutFilter>
|
||||
put_job: ApiMethod<RequestParams.XpackMlPutJob>
|
||||
putJob: ApiMethod<RequestParams.XpackMlPutJob>
|
||||
revert_model_snapshot: ApiMethod<RequestParams.XpackMlRevertModelSnapshot>
|
||||
revertModelSnapshot: ApiMethod<RequestParams.XpackMlRevertModelSnapshot>
|
||||
set_upgrade_mode: ApiMethod<RequestParams.XpackMlSetUpgradeMode>
|
||||
setUpgradeMode: ApiMethod<RequestParams.XpackMlSetUpgradeMode>
|
||||
start_datafeed: ApiMethod<RequestParams.XpackMlStartDatafeed>
|
||||
startDatafeed: ApiMethod<RequestParams.XpackMlStartDatafeed>
|
||||
stop_datafeed: ApiMethod<RequestParams.XpackMlStopDatafeed>
|
||||
stopDatafeed: ApiMethod<RequestParams.XpackMlStopDatafeed>
|
||||
update_datafeed: ApiMethod<RequestParams.XpackMlUpdateDatafeed>
|
||||
updateDatafeed: ApiMethod<RequestParams.XpackMlUpdateDatafeed>
|
||||
update_filter: ApiMethod<RequestParams.XpackMlUpdateFilter>
|
||||
updateFilter: ApiMethod<RequestParams.XpackMlUpdateFilter>
|
||||
update_job: ApiMethod<RequestParams.XpackMlUpdateJob>
|
||||
updateJob: ApiMethod<RequestParams.XpackMlUpdateJob>
|
||||
update_model_snapshot: ApiMethod<RequestParams.XpackMlUpdateModelSnapshot>
|
||||
updateModelSnapshot: ApiMethod<RequestParams.XpackMlUpdateModelSnapshot>
|
||||
validate: ApiMethod<RequestParams.XpackMlValidate>
|
||||
validate_detector: ApiMethod<RequestParams.XpackMlValidateDetector>
|
||||
validateDetector: ApiMethod<RequestParams.XpackMlValidateDetector>
|
||||
close_job: ApiMethod<RequestParams.XpackMlCloseJob>
|
||||
closeJob: ApiMethod<RequestParams.XpackMlCloseJob>
|
||||
delete_calendar: ApiMethod<RequestParams.XpackMlDeleteCalendar>
|
||||
deleteCalendar: ApiMethod<RequestParams.XpackMlDeleteCalendar>
|
||||
delete_calendar_event: ApiMethod<RequestParams.XpackMlDeleteCalendarEvent>
|
||||
deleteCalendarEvent: ApiMethod<RequestParams.XpackMlDeleteCalendarEvent>
|
||||
delete_calendar_job: ApiMethod<RequestParams.XpackMlDeleteCalendarJob>
|
||||
deleteCalendarJob: ApiMethod<RequestParams.XpackMlDeleteCalendarJob>
|
||||
delete_datafeed: ApiMethod<RequestParams.XpackMlDeleteDatafeed>
|
||||
deleteDatafeed: ApiMethod<RequestParams.XpackMlDeleteDatafeed>
|
||||
delete_expired_data: ApiMethod<RequestParams.XpackMlDeleteExpiredData>
|
||||
deleteExpiredData: ApiMethod<RequestParams.XpackMlDeleteExpiredData>
|
||||
delete_filter: ApiMethod<RequestParams.XpackMlDeleteFilter>
|
||||
deleteFilter: ApiMethod<RequestParams.XpackMlDeleteFilter>
|
||||
delete_forecast: ApiMethod<RequestParams.XpackMlDeleteForecast>
|
||||
deleteForecast: ApiMethod<RequestParams.XpackMlDeleteForecast>
|
||||
delete_job: ApiMethod<RequestParams.XpackMlDeleteJob>
|
||||
deleteJob: ApiMethod<RequestParams.XpackMlDeleteJob>
|
||||
delete_model_snapshot: ApiMethod<RequestParams.XpackMlDeleteModelSnapshot>
|
||||
deleteModelSnapshot: ApiMethod<RequestParams.XpackMlDeleteModelSnapshot>
|
||||
find_file_structure: ApiMethod<RequestParams.XpackMlFindFileStructure>
|
||||
findFileStructure: ApiMethod<RequestParams.XpackMlFindFileStructure>
|
||||
flush_job: ApiMethod<RequestParams.XpackMlFlushJob>
|
||||
flushJob: ApiMethod<RequestParams.XpackMlFlushJob>
|
||||
forecast: ApiMethod<RequestParams.XpackMlForecast>
|
||||
get_buckets: ApiMethod<RequestParams.XpackMlGetBuckets>
|
||||
getBuckets: ApiMethod<RequestParams.XpackMlGetBuckets>
|
||||
get_calendar_events: ApiMethod<RequestParams.XpackMlGetCalendarEvents>
|
||||
getCalendarEvents: ApiMethod<RequestParams.XpackMlGetCalendarEvents>
|
||||
get_calendars: ApiMethod<RequestParams.XpackMlGetCalendars>
|
||||
getCalendars: ApiMethod<RequestParams.XpackMlGetCalendars>
|
||||
get_categories: ApiMethod<RequestParams.XpackMlGetCategories>
|
||||
getCategories: ApiMethod<RequestParams.XpackMlGetCategories>
|
||||
get_datafeed_stats: ApiMethod<RequestParams.XpackMlGetDatafeedStats>
|
||||
getDatafeedStats: ApiMethod<RequestParams.XpackMlGetDatafeedStats>
|
||||
get_datafeeds: ApiMethod<RequestParams.XpackMlGetDatafeeds>
|
||||
getDatafeeds: ApiMethod<RequestParams.XpackMlGetDatafeeds>
|
||||
get_filters: ApiMethod<RequestParams.XpackMlGetFilters>
|
||||
getFilters: ApiMethod<RequestParams.XpackMlGetFilters>
|
||||
get_influencers: ApiMethod<RequestParams.XpackMlGetInfluencers>
|
||||
getInfluencers: ApiMethod<RequestParams.XpackMlGetInfluencers>
|
||||
get_job_stats: ApiMethod<RequestParams.XpackMlGetJobStats>
|
||||
getJobStats: ApiMethod<RequestParams.XpackMlGetJobStats>
|
||||
get_jobs: ApiMethod<RequestParams.XpackMlGetJobs>
|
||||
getJobs: ApiMethod<RequestParams.XpackMlGetJobs>
|
||||
get_model_snapshots: ApiMethod<RequestParams.XpackMlGetModelSnapshots>
|
||||
getModelSnapshots: ApiMethod<RequestParams.XpackMlGetModelSnapshots>
|
||||
get_overall_buckets: ApiMethod<RequestParams.XpackMlGetOverallBuckets>
|
||||
getOverallBuckets: ApiMethod<RequestParams.XpackMlGetOverallBuckets>
|
||||
get_records: ApiMethod<RequestParams.XpackMlGetRecords>
|
||||
getRecords: ApiMethod<RequestParams.XpackMlGetRecords>
|
||||
info: ApiMethod<RequestParams.XpackMlInfo>
|
||||
open_job: ApiMethod<RequestParams.XpackMlOpenJob>
|
||||
openJob: ApiMethod<RequestParams.XpackMlOpenJob>
|
||||
post_calendar_events: ApiMethod<RequestParams.XpackMlPostCalendarEvents>
|
||||
postCalendarEvents: ApiMethod<RequestParams.XpackMlPostCalendarEvents>
|
||||
post_data: ApiMethod<RequestParams.XpackMlPostData>
|
||||
postData: ApiMethod<RequestParams.XpackMlPostData>
|
||||
preview_datafeed: ApiMethod<RequestParams.XpackMlPreviewDatafeed>
|
||||
previewDatafeed: ApiMethod<RequestParams.XpackMlPreviewDatafeed>
|
||||
put_calendar: ApiMethod<RequestParams.XpackMlPutCalendar>
|
||||
putCalendar: ApiMethod<RequestParams.XpackMlPutCalendar>
|
||||
put_calendar_job: ApiMethod<RequestParams.XpackMlPutCalendarJob>
|
||||
putCalendarJob: ApiMethod<RequestParams.XpackMlPutCalendarJob>
|
||||
put_datafeed: ApiMethod<RequestParams.XpackMlPutDatafeed>
|
||||
putDatafeed: ApiMethod<RequestParams.XpackMlPutDatafeed>
|
||||
put_filter: ApiMethod<RequestParams.XpackMlPutFilter>
|
||||
putFilter: ApiMethod<RequestParams.XpackMlPutFilter>
|
||||
put_job: ApiMethod<RequestParams.XpackMlPutJob>
|
||||
putJob: ApiMethod<RequestParams.XpackMlPutJob>
|
||||
revert_model_snapshot: ApiMethod<RequestParams.XpackMlRevertModelSnapshot>
|
||||
revertModelSnapshot: ApiMethod<RequestParams.XpackMlRevertModelSnapshot>
|
||||
set_upgrade_mode: ApiMethod<RequestParams.XpackMlSetUpgradeMode>
|
||||
setUpgradeMode: ApiMethod<RequestParams.XpackMlSetUpgradeMode>
|
||||
start_datafeed: ApiMethod<RequestParams.XpackMlStartDatafeed>
|
||||
startDatafeed: ApiMethod<RequestParams.XpackMlStartDatafeed>
|
||||
stop_datafeed: ApiMethod<RequestParams.XpackMlStopDatafeed>
|
||||
stopDatafeed: ApiMethod<RequestParams.XpackMlStopDatafeed>
|
||||
update_datafeed: ApiMethod<RequestParams.XpackMlUpdateDatafeed>
|
||||
updateDatafeed: ApiMethod<RequestParams.XpackMlUpdateDatafeed>
|
||||
update_filter: ApiMethod<RequestParams.XpackMlUpdateFilter>
|
||||
updateFilter: ApiMethod<RequestParams.XpackMlUpdateFilter>
|
||||
update_job: ApiMethod<RequestParams.XpackMlUpdateJob>
|
||||
updateJob: ApiMethod<RequestParams.XpackMlUpdateJob>
|
||||
update_model_snapshot: ApiMethod<RequestParams.XpackMlUpdateModelSnapshot>
|
||||
updateModelSnapshot: ApiMethod<RequestParams.XpackMlUpdateModelSnapshot>
|
||||
validate: ApiMethod<RequestParams.XpackMlValidate>
|
||||
validate_detector: ApiMethod<RequestParams.XpackMlValidateDetector>
|
||||
validateDetector: ApiMethod<RequestParams.XpackMlValidateDetector>
|
||||
}
|
||||
monitoring: {
|
||||
bulk: ApiMethod<RequestParams.XpackMonitoringBulk>
|
||||
bulk: ApiMethod<RequestParams.XpackMonitoringBulk>
|
||||
}
|
||||
rollup: {
|
||||
delete_job: ApiMethod<RequestParams.XpackRollupDeleteJob>
|
||||
deleteJob: ApiMethod<RequestParams.XpackRollupDeleteJob>
|
||||
get_jobs: ApiMethod<RequestParams.XpackRollupGetJobs>
|
||||
getJobs: ApiMethod<RequestParams.XpackRollupGetJobs>
|
||||
get_rollup_caps: ApiMethod<RequestParams.XpackRollupGetRollupCaps>
|
||||
getRollupCaps: ApiMethod<RequestParams.XpackRollupGetRollupCaps>
|
||||
get_rollup_index_caps: ApiMethod<RequestParams.XpackRollupGetRollupIndexCaps>
|
||||
getRollupIndexCaps: ApiMethod<RequestParams.XpackRollupGetRollupIndexCaps>
|
||||
put_job: ApiMethod<RequestParams.XpackRollupPutJob>
|
||||
putJob: ApiMethod<RequestParams.XpackRollupPutJob>
|
||||
rollup_search: ApiMethod<RequestParams.XpackRollupRollupSearch>
|
||||
rollupSearch: ApiMethod<RequestParams.XpackRollupRollupSearch>
|
||||
start_job: ApiMethod<RequestParams.XpackRollupStartJob>
|
||||
startJob: ApiMethod<RequestParams.XpackRollupStartJob>
|
||||
stop_job: ApiMethod<RequestParams.XpackRollupStopJob>
|
||||
stopJob: ApiMethod<RequestParams.XpackRollupStopJob>
|
||||
delete_job: ApiMethod<RequestParams.XpackRollupDeleteJob>
|
||||
deleteJob: ApiMethod<RequestParams.XpackRollupDeleteJob>
|
||||
get_jobs: ApiMethod<RequestParams.XpackRollupGetJobs>
|
||||
getJobs: ApiMethod<RequestParams.XpackRollupGetJobs>
|
||||
get_rollup_caps: ApiMethod<RequestParams.XpackRollupGetRollupCaps>
|
||||
getRollupCaps: ApiMethod<RequestParams.XpackRollupGetRollupCaps>
|
||||
get_rollup_index_caps: ApiMethod<RequestParams.XpackRollupGetRollupIndexCaps>
|
||||
getRollupIndexCaps: ApiMethod<RequestParams.XpackRollupGetRollupIndexCaps>
|
||||
put_job: ApiMethod<RequestParams.XpackRollupPutJob>
|
||||
putJob: ApiMethod<RequestParams.XpackRollupPutJob>
|
||||
rollup_search: ApiMethod<RequestParams.XpackRollupRollupSearch>
|
||||
rollupSearch: ApiMethod<RequestParams.XpackRollupRollupSearch>
|
||||
start_job: ApiMethod<RequestParams.XpackRollupStartJob>
|
||||
startJob: ApiMethod<RequestParams.XpackRollupStartJob>
|
||||
stop_job: ApiMethod<RequestParams.XpackRollupStopJob>
|
||||
stopJob: ApiMethod<RequestParams.XpackRollupStopJob>
|
||||
}
|
||||
security: {
|
||||
authenticate: ApiMethod<RequestParams.XpackSecurityAuthenticate>
|
||||
change_password: ApiMethod<RequestParams.XpackSecurityChangePassword>
|
||||
changePassword: ApiMethod<RequestParams.XpackSecurityChangePassword>
|
||||
clear_cached_realms: ApiMethod<RequestParams.XpackSecurityClearCachedRealms>
|
||||
clearCachedRealms: ApiMethod<RequestParams.XpackSecurityClearCachedRealms>
|
||||
clear_cached_roles: ApiMethod<RequestParams.XpackSecurityClearCachedRoles>
|
||||
clearCachedRoles: ApiMethod<RequestParams.XpackSecurityClearCachedRoles>
|
||||
delete_privileges: ApiMethod<RequestParams.XpackSecurityDeletePrivileges>
|
||||
deletePrivileges: ApiMethod<RequestParams.XpackSecurityDeletePrivileges>
|
||||
delete_role: ApiMethod<RequestParams.XpackSecurityDeleteRole>
|
||||
deleteRole: ApiMethod<RequestParams.XpackSecurityDeleteRole>
|
||||
delete_role_mapping: ApiMethod<RequestParams.XpackSecurityDeleteRoleMapping>
|
||||
deleteRoleMapping: ApiMethod<RequestParams.XpackSecurityDeleteRoleMapping>
|
||||
delete_user: ApiMethod<RequestParams.XpackSecurityDeleteUser>
|
||||
deleteUser: ApiMethod<RequestParams.XpackSecurityDeleteUser>
|
||||
disable_user: ApiMethod<RequestParams.XpackSecurityDisableUser>
|
||||
disableUser: ApiMethod<RequestParams.XpackSecurityDisableUser>
|
||||
enable_user: ApiMethod<RequestParams.XpackSecurityEnableUser>
|
||||
enableUser: ApiMethod<RequestParams.XpackSecurityEnableUser>
|
||||
get_privileges: ApiMethod<RequestParams.XpackSecurityGetPrivileges>
|
||||
getPrivileges: ApiMethod<RequestParams.XpackSecurityGetPrivileges>
|
||||
get_role: ApiMethod<RequestParams.XpackSecurityGetRole>
|
||||
getRole: ApiMethod<RequestParams.XpackSecurityGetRole>
|
||||
get_role_mapping: ApiMethod<RequestParams.XpackSecurityGetRoleMapping>
|
||||
getRoleMapping: ApiMethod<RequestParams.XpackSecurityGetRoleMapping>
|
||||
get_token: ApiMethod<RequestParams.XpackSecurityGetToken>
|
||||
getToken: ApiMethod<RequestParams.XpackSecurityGetToken>
|
||||
get_user: ApiMethod<RequestParams.XpackSecurityGetUser>
|
||||
getUser: ApiMethod<RequestParams.XpackSecurityGetUser>
|
||||
get_user_privileges: ApiMethod<RequestParams.XpackSecurityGetUserPrivileges>
|
||||
getUserPrivileges: ApiMethod<RequestParams.XpackSecurityGetUserPrivileges>
|
||||
has_privileges: ApiMethod<RequestParams.XpackSecurityHasPrivileges>
|
||||
hasPrivileges: ApiMethod<RequestParams.XpackSecurityHasPrivileges>
|
||||
invalidate_token: ApiMethod<RequestParams.XpackSecurityInvalidateToken>
|
||||
invalidateToken: ApiMethod<RequestParams.XpackSecurityInvalidateToken>
|
||||
put_privileges: ApiMethod<RequestParams.XpackSecurityPutPrivileges>
|
||||
putPrivileges: ApiMethod<RequestParams.XpackSecurityPutPrivileges>
|
||||
put_role: ApiMethod<RequestParams.XpackSecurityPutRole>
|
||||
putRole: ApiMethod<RequestParams.XpackSecurityPutRole>
|
||||
put_role_mapping: ApiMethod<RequestParams.XpackSecurityPutRoleMapping>
|
||||
putRoleMapping: ApiMethod<RequestParams.XpackSecurityPutRoleMapping>
|
||||
put_user: ApiMethod<RequestParams.XpackSecurityPutUser>
|
||||
putUser: ApiMethod<RequestParams.XpackSecurityPutUser>
|
||||
authenticate: ApiMethod<RequestParams.XpackSecurityAuthenticate>
|
||||
change_password: ApiMethod<RequestParams.XpackSecurityChangePassword>
|
||||
changePassword: ApiMethod<RequestParams.XpackSecurityChangePassword>
|
||||
clear_cached_realms: ApiMethod<RequestParams.XpackSecurityClearCachedRealms>
|
||||
clearCachedRealms: ApiMethod<RequestParams.XpackSecurityClearCachedRealms>
|
||||
clear_cached_roles: ApiMethod<RequestParams.XpackSecurityClearCachedRoles>
|
||||
clearCachedRoles: ApiMethod<RequestParams.XpackSecurityClearCachedRoles>
|
||||
delete_privileges: ApiMethod<RequestParams.XpackSecurityDeletePrivileges>
|
||||
deletePrivileges: ApiMethod<RequestParams.XpackSecurityDeletePrivileges>
|
||||
delete_role: ApiMethod<RequestParams.XpackSecurityDeleteRole>
|
||||
deleteRole: ApiMethod<RequestParams.XpackSecurityDeleteRole>
|
||||
delete_role_mapping: ApiMethod<RequestParams.XpackSecurityDeleteRoleMapping>
|
||||
deleteRoleMapping: ApiMethod<RequestParams.XpackSecurityDeleteRoleMapping>
|
||||
delete_user: ApiMethod<RequestParams.XpackSecurityDeleteUser>
|
||||
deleteUser: ApiMethod<RequestParams.XpackSecurityDeleteUser>
|
||||
disable_user: ApiMethod<RequestParams.XpackSecurityDisableUser>
|
||||
disableUser: ApiMethod<RequestParams.XpackSecurityDisableUser>
|
||||
enable_user: ApiMethod<RequestParams.XpackSecurityEnableUser>
|
||||
enableUser: ApiMethod<RequestParams.XpackSecurityEnableUser>
|
||||
get_privileges: ApiMethod<RequestParams.XpackSecurityGetPrivileges>
|
||||
getPrivileges: ApiMethod<RequestParams.XpackSecurityGetPrivileges>
|
||||
get_role: ApiMethod<RequestParams.XpackSecurityGetRole>
|
||||
getRole: ApiMethod<RequestParams.XpackSecurityGetRole>
|
||||
get_role_mapping: ApiMethod<RequestParams.XpackSecurityGetRoleMapping>
|
||||
getRoleMapping: ApiMethod<RequestParams.XpackSecurityGetRoleMapping>
|
||||
get_token: ApiMethod<RequestParams.XpackSecurityGetToken>
|
||||
getToken: ApiMethod<RequestParams.XpackSecurityGetToken>
|
||||
get_user: ApiMethod<RequestParams.XpackSecurityGetUser>
|
||||
getUser: ApiMethod<RequestParams.XpackSecurityGetUser>
|
||||
get_user_privileges: ApiMethod<RequestParams.XpackSecurityGetUserPrivileges>
|
||||
getUserPrivileges: ApiMethod<RequestParams.XpackSecurityGetUserPrivileges>
|
||||
has_privileges: ApiMethod<RequestParams.XpackSecurityHasPrivileges>
|
||||
hasPrivileges: ApiMethod<RequestParams.XpackSecurityHasPrivileges>
|
||||
invalidate_token: ApiMethod<RequestParams.XpackSecurityInvalidateToken>
|
||||
invalidateToken: ApiMethod<RequestParams.XpackSecurityInvalidateToken>
|
||||
put_privileges: ApiMethod<RequestParams.XpackSecurityPutPrivileges>
|
||||
putPrivileges: ApiMethod<RequestParams.XpackSecurityPutPrivileges>
|
||||
put_role: ApiMethod<RequestParams.XpackSecurityPutRole>
|
||||
putRole: ApiMethod<RequestParams.XpackSecurityPutRole>
|
||||
put_role_mapping: ApiMethod<RequestParams.XpackSecurityPutRoleMapping>
|
||||
putRoleMapping: ApiMethod<RequestParams.XpackSecurityPutRoleMapping>
|
||||
put_user: ApiMethod<RequestParams.XpackSecurityPutUser>
|
||||
putUser: ApiMethod<RequestParams.XpackSecurityPutUser>
|
||||
}
|
||||
sql: {
|
||||
clear_cursor: ApiMethod<RequestParams.XpackSqlClearCursor>
|
||||
clearCursor: ApiMethod<RequestParams.XpackSqlClearCursor>
|
||||
query: ApiMethod<RequestParams.XpackSqlQuery>
|
||||
translate: ApiMethod<RequestParams.XpackSqlTranslate>
|
||||
clear_cursor: ApiMethod<RequestParams.XpackSqlClearCursor>
|
||||
clearCursor: ApiMethod<RequestParams.XpackSqlClearCursor>
|
||||
query: ApiMethod<RequestParams.XpackSqlQuery>
|
||||
translate: ApiMethod<RequestParams.XpackSqlTranslate>
|
||||
}
|
||||
ssl: {
|
||||
certificates: ApiMethod<RequestParams.XpackSslCertificates>
|
||||
certificates: ApiMethod<RequestParams.XpackSslCertificates>
|
||||
}
|
||||
usage: ApiMethod<RequestParams.XpackUsage>
|
||||
watcher: {
|
||||
ack_watch: ApiMethod<RequestParams.XpackWatcherAckWatch>
|
||||
ackWatch: ApiMethod<RequestParams.XpackWatcherAckWatch>
|
||||
activate_watch: ApiMethod<RequestParams.XpackWatcherActivateWatch>
|
||||
activateWatch: ApiMethod<RequestParams.XpackWatcherActivateWatch>
|
||||
deactivate_watch: ApiMethod<RequestParams.XpackWatcherDeactivateWatch>
|
||||
deactivateWatch: ApiMethod<RequestParams.XpackWatcherDeactivateWatch>
|
||||
delete_watch: ApiMethod<RequestParams.XpackWatcherDeleteWatch>
|
||||
deleteWatch: ApiMethod<RequestParams.XpackWatcherDeleteWatch>
|
||||
execute_watch: ApiMethod<RequestParams.XpackWatcherExecuteWatch>
|
||||
executeWatch: ApiMethod<RequestParams.XpackWatcherExecuteWatch>
|
||||
get_watch: ApiMethod<RequestParams.XpackWatcherGetWatch>
|
||||
getWatch: ApiMethod<RequestParams.XpackWatcherGetWatch>
|
||||
put_watch: ApiMethod<RequestParams.XpackWatcherPutWatch>
|
||||
putWatch: ApiMethod<RequestParams.XpackWatcherPutWatch>
|
||||
restart: ApiMethod<RequestParams.XpackWatcherRestart>
|
||||
start: ApiMethod<RequestParams.XpackWatcherStart>
|
||||
stats: ApiMethod<RequestParams.XpackWatcherStats>
|
||||
stop: ApiMethod<RequestParams.XpackWatcherStop>
|
||||
ack_watch: ApiMethod<RequestParams.XpackWatcherAckWatch>
|
||||
ackWatch: ApiMethod<RequestParams.XpackWatcherAckWatch>
|
||||
activate_watch: ApiMethod<RequestParams.XpackWatcherActivateWatch>
|
||||
activateWatch: ApiMethod<RequestParams.XpackWatcherActivateWatch>
|
||||
deactivate_watch: ApiMethod<RequestParams.XpackWatcherDeactivateWatch>
|
||||
deactivateWatch: ApiMethod<RequestParams.XpackWatcherDeactivateWatch>
|
||||
delete_watch: ApiMethod<RequestParams.XpackWatcherDeleteWatch>
|
||||
deleteWatch: ApiMethod<RequestParams.XpackWatcherDeleteWatch>
|
||||
execute_watch: ApiMethod<RequestParams.XpackWatcherExecuteWatch>
|
||||
executeWatch: ApiMethod<RequestParams.XpackWatcherExecuteWatch>
|
||||
get_watch: ApiMethod<RequestParams.XpackWatcherGetWatch>
|
||||
getWatch: ApiMethod<RequestParams.XpackWatcherGetWatch>
|
||||
put_watch: ApiMethod<RequestParams.XpackWatcherPutWatch>
|
||||
putWatch: ApiMethod<RequestParams.XpackWatcherPutWatch>
|
||||
restart: ApiMethod<RequestParams.XpackWatcherRestart>
|
||||
start: ApiMethod<RequestParams.XpackWatcherStart>
|
||||
stats: ApiMethod<RequestParams.XpackWatcherStats>
|
||||
stop: ApiMethod<RequestParams.XpackWatcherStop>
|
||||
}
|
||||
}
|
||||
/* /GENERATED */
|
||||
}
|
||||
|
||||
declare const events: {
|
||||
@ -588,5 +608,6 @@ export {
|
||||
ResurrectEvent,
|
||||
RequestParams,
|
||||
ClientOptions,
|
||||
NodeOptions,
|
||||
ClientExtendsCallbackOptions
|
||||
};
|
||||
|
||||
60
index.js
60
index.js
@ -20,10 +20,11 @@
|
||||
'use strict'
|
||||
|
||||
const { EventEmitter } = require('events')
|
||||
const { URL } = require('url')
|
||||
const debug = require('debug')('elasticsearch')
|
||||
const Transport = require('./lib/Transport')
|
||||
const Connection = require('./lib/Connection')
|
||||
const ConnectionPool = require('./lib/ConnectionPool')
|
||||
const { ConnectionPool, CloudConnectionPool } = require('./lib/pool')
|
||||
const Serializer = require('./lib/Serializer')
|
||||
const errors = require('./lib/errors')
|
||||
const { ConfigurationError } = errors
|
||||
@ -43,7 +44,12 @@ class Client extends EventEmitter {
|
||||
// the url is a string divided by two '$', the first is the cloud url
|
||||
// the second the elasticsearch instance, the third the kibana instance
|
||||
const cloudUrls = Buffer.from(id.split(':')[1], 'base64').toString().split('$')
|
||||
opts.node = `https://${username}:${password}@${cloudUrls[1]}.${cloudUrls[0]}`
|
||||
|
||||
// TODO: remove username and password here in 8
|
||||
if (username && password) {
|
||||
opts.auth = Object.assign({}, opts.auth, { username, password })
|
||||
}
|
||||
opts.node = `https://${cloudUrls[1]}.${cloudUrls[0]}`
|
||||
|
||||
// Cloud has better performances with compression enabled
|
||||
// see https://github.com/elastic/elasticsearch-py/pull/704.
|
||||
@ -61,11 +67,16 @@ class Client extends EventEmitter {
|
||||
throw new ConfigurationError('Missing node(s) option')
|
||||
}
|
||||
|
||||
const checkAuth = getAuth(opts.node || opts.nodes)
|
||||
if (checkAuth && checkAuth.username && checkAuth.password) {
|
||||
opts.auth = Object.assign({}, opts.auth, { username: checkAuth.username, password: checkAuth.password })
|
||||
}
|
||||
|
||||
const options = Object.assign({}, {
|
||||
Connection,
|
||||
ConnectionPool,
|
||||
Transport,
|
||||
Serializer,
|
||||
ConnectionPool: opts.cloud ? CloudConnectionPool : ConnectionPool,
|
||||
maxRetries: 3,
|
||||
requestTimeout: 30000,
|
||||
pingTimeout: 3000,
|
||||
@ -82,7 +93,9 @@ class Client extends EventEmitter {
|
||||
nodeFilter: null,
|
||||
nodeSelector: 'round-robin',
|
||||
generateRequestId: null,
|
||||
name: 'elasticsearch-js'
|
||||
name: 'elasticsearch-js',
|
||||
auth: null,
|
||||
opaqueIdPrefix: null
|
||||
}, opts)
|
||||
|
||||
this[kInitialOptions] = options
|
||||
@ -96,6 +109,7 @@ class Client extends EventEmitter {
|
||||
ssl: options.ssl,
|
||||
agent: options.agent,
|
||||
Connection: options.Connection,
|
||||
auth: options.auth,
|
||||
emit: this.emit.bind(this),
|
||||
sniffEnabled: options.sniffInterval !== false ||
|
||||
options.sniffOnStart !== false ||
|
||||
@ -123,7 +137,8 @@ class Client extends EventEmitter {
|
||||
nodeFilter: options.nodeFilter,
|
||||
nodeSelector: options.nodeSelector,
|
||||
generateRequestId: options.generateRequestId,
|
||||
name: options.name
|
||||
name: options.name,
|
||||
opaqueIdPrefix: options.opaqueIdPrefix
|
||||
})
|
||||
|
||||
const apis = buildApi({
|
||||
@ -209,6 +224,41 @@ class Client extends EventEmitter {
|
||||
}
|
||||
}
|
||||
|
||||
function getAuth (node) {
|
||||
if (Array.isArray(node)) {
|
||||
for (const url of node) {
|
||||
const auth = getUsernameAndPassword(url)
|
||||
if (auth.username !== '' && auth.password !== '') {
|
||||
return auth
|
||||
}
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
const auth = getUsernameAndPassword(node)
|
||||
if (auth.username !== '' && auth.password !== '') {
|
||||
return auth
|
||||
}
|
||||
|
||||
return null
|
||||
|
||||
function getUsernameAndPassword (node) {
|
||||
if (typeof node === 'string') {
|
||||
const { username, password } = new URL(node)
|
||||
return {
|
||||
username: decodeURIComponent(username),
|
||||
password: decodeURIComponent(password)
|
||||
}
|
||||
} else if (node.url instanceof URL) {
|
||||
return {
|
||||
username: decodeURIComponent(node.url.username),
|
||||
password: decodeURIComponent(node.url.password)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const events = {
|
||||
RESPONSE: 'response',
|
||||
REQUEST: 'request',
|
||||
|
||||
10
lib/Connection.d.ts
vendored
10
lib/Connection.d.ts
vendored
@ -21,23 +21,27 @@
|
||||
|
||||
import { URL } from 'url';
|
||||
import { inspect, InspectOptions } from 'util';
|
||||
import { ApiKeyAuth, BasicAuth } from './pool'
|
||||
import * as http from 'http';
|
||||
import { SecureContextOptions } from 'tls';
|
||||
import { ConnectionOptions as TlsConnectionOptions } from 'tls';
|
||||
|
||||
export declare type agentFn = () => any;
|
||||
|
||||
interface ConnectionOptions {
|
||||
url: URL;
|
||||
ssl?: SecureContextOptions;
|
||||
ssl?: TlsConnectionOptions;
|
||||
id?: string;
|
||||
headers?: any;
|
||||
agent?: AgentOptions | agentFn;
|
||||
status?: string;
|
||||
roles?: any;
|
||||
auth?: BasicAuth | ApiKeyAuth;
|
||||
}
|
||||
|
||||
interface RequestOptions extends http.ClientRequestArgs {
|
||||
asStream?: boolean;
|
||||
body?: any;
|
||||
querystring?: string;
|
||||
}
|
||||
|
||||
export interface AgentOptions {
|
||||
@ -59,7 +63,7 @@ export default class Connection {
|
||||
ML: string;
|
||||
};
|
||||
url: URL;
|
||||
ssl: SecureContextOptions | null;
|
||||
ssl: TlsConnectionOptions | null;
|
||||
id: string;
|
||||
headers: any;
|
||||
deadCount: number;
|
||||
|
||||
@ -34,8 +34,7 @@ class Connection {
|
||||
this.url = opts.url
|
||||
this.ssl = opts.ssl || null
|
||||
this.id = opts.id || stripAuth(opts.url.href)
|
||||
this.headers = opts.headers || null
|
||||
this.auth = opts.auth || { username: null, password: null }
|
||||
this.headers = prepareHeaders(opts.headers, opts.auth)
|
||||
this.deadCount = 0
|
||||
this.resurrectTimeout = 0
|
||||
|
||||
@ -181,7 +180,6 @@ class Connection {
|
||||
|
||||
buildRequestObject (params) {
|
||||
const url = this.url
|
||||
const { username, password } = this.auth
|
||||
const request = {
|
||||
protocol: url.protocol,
|
||||
hostname: url.hostname[0] === '['
|
||||
@ -196,9 +194,6 @@ class Connection {
|
||||
// https://github.com/elastic/elasticsearch-js/issues/843
|
||||
port: url.port !== '' ? url.port : undefined,
|
||||
headers: this.headers,
|
||||
auth: username != null && password != null
|
||||
? `${username}:${password}`
|
||||
: undefined,
|
||||
agent: this.agent
|
||||
}
|
||||
|
||||
@ -230,10 +225,15 @@ class Connection {
|
||||
// the logs very hard to read. The user can still
|
||||
// access them with `instance.agent` and `instance.ssl`.
|
||||
[inspect.custom] (depth, options) {
|
||||
const {
|
||||
authorization,
|
||||
...headers
|
||||
} = this.headers
|
||||
|
||||
return {
|
||||
url: this.url,
|
||||
url: stripAuth(this.url.toString()),
|
||||
id: this.id,
|
||||
headers: this.headers,
|
||||
headers,
|
||||
deadCount: this.deadCount,
|
||||
resurrectTimeout: this.resurrectTimeout,
|
||||
_openRequests: this._openRequests,
|
||||
@ -243,10 +243,15 @@ class Connection {
|
||||
}
|
||||
|
||||
toJSON () {
|
||||
const {
|
||||
authorization,
|
||||
...headers
|
||||
} = this.headers
|
||||
|
||||
return {
|
||||
url: this.url,
|
||||
url: stripAuth(this.url.toString()),
|
||||
id: this.id,
|
||||
headers: this.headers,
|
||||
headers,
|
||||
deadCount: this.deadCount,
|
||||
resurrectTimeout: this.resurrectTimeout,
|
||||
_openRequests: this._openRequests,
|
||||
@ -302,4 +307,19 @@ function resolve (host, path) {
|
||||
}
|
||||
}
|
||||
|
||||
function prepareHeaders (headers = {}, auth) {
|
||||
if (auth != null && headers.authorization == null) {
|
||||
if (auth.apiKey) {
|
||||
if (typeof auth.apiKey === 'object') {
|
||||
headers.authorization = 'ApiKey ' + Buffer.from(`${auth.apiKey.id}:${auth.apiKey.api_key}`).toString('base64')
|
||||
} else {
|
||||
headers.authorization = `ApiKey ${auth.apiKey}`
|
||||
}
|
||||
} else if (auth.username && auth.password) {
|
||||
headers.authorization = 'Basic ' + Buffer.from(`${auth.username}:${auth.password}`).toString('base64')
|
||||
}
|
||||
}
|
||||
return headers
|
||||
}
|
||||
|
||||
module.exports = Connection
|
||||
|
||||
@ -1,410 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch B.V. under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch B.V. licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
'use strict'
|
||||
|
||||
const assert = require('assert')
|
||||
const { URL } = require('url')
|
||||
const debug = require('debug')('elasticsearch')
|
||||
const Connection = require('./Connection')
|
||||
const noop = () => {}
|
||||
|
||||
class ConnectionPool {
|
||||
constructor (opts = {}) {
|
||||
this.connections = new Map()
|
||||
this.dead = []
|
||||
this.selector = opts.selector
|
||||
this._auth = null
|
||||
this._ssl = opts.ssl
|
||||
this._agent = opts.agent
|
||||
// the resurrect timeout is 60s
|
||||
this.resurrectTimeout = 1000 * 60
|
||||
// number of consecutive failures after which
|
||||
// the timeout doesn't increase
|
||||
this.resurrectTimeoutCutoff = 5
|
||||
this.pingTimeout = opts.pingTimeout
|
||||
this.Connection = opts.Connection
|
||||
this.emit = opts.emit || noop
|
||||
this._sniffEnabled = opts.sniffEnabled || false
|
||||
|
||||
const resurrectStrategy = opts.resurrectStrategy || 'ping'
|
||||
this.resurrectStrategy = ConnectionPool.resurrectStrategies[resurrectStrategy]
|
||||
assert(
|
||||
this.resurrectStrategy != null,
|
||||
`Invalid resurrection strategy: '${resurrectStrategy}'`
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Marks a connection as 'alive'.
|
||||
* If needed removes the connection from the dead list
|
||||
* and then resets the `deadCount`.
|
||||
* If sniffing is not enabled and there is only
|
||||
* one node, this method is a noop.
|
||||
*
|
||||
* @param {object} connection
|
||||
*/
|
||||
markAlive (connection) {
|
||||
if (this._sniffEnabled === false && this.connections.size === 1) return
|
||||
const { id } = connection
|
||||
debug(`Marking as 'alive' connection '${id}'`)
|
||||
const index = this.dead.indexOf(id)
|
||||
if (index > -1) this.dead.splice(index, 1)
|
||||
connection.status = Connection.statuses.ALIVE
|
||||
connection.deadCount = 0
|
||||
connection.resurrectTimeout = 0
|
||||
}
|
||||
|
||||
/**
|
||||
* Marks a connection as 'dead'.
|
||||
* If needed adds the connection to the dead list
|
||||
* and then increments the `deadCount`.
|
||||
* If sniffing is not enabled and there is only
|
||||
* one node, this method is a noop.
|
||||
*
|
||||
* @param {object} connection
|
||||
*/
|
||||
markDead (connection) {
|
||||
if (this._sniffEnabled === false && this.connections.size === 1) return
|
||||
const { id } = connection
|
||||
debug(`Marking as 'dead' connection '${id}'`)
|
||||
if (this.dead.indexOf(id) === -1) {
|
||||
this.dead.push(id)
|
||||
}
|
||||
connection.status = Connection.statuses.DEAD
|
||||
connection.deadCount++
|
||||
// resurrectTimeout formula:
|
||||
// `resurrectTimeout * 2 ** min(deadCount - 1, resurrectTimeoutCutoff)`
|
||||
connection.resurrectTimeout = Date.now() + this.resurrectTimeout * Math.pow(
|
||||
2, Math.min(connection.deadCount - 1, this.resurrectTimeoutCutoff)
|
||||
)
|
||||
|
||||
// sort the dead list in ascending order
|
||||
// based on the resurrectTimeout
|
||||
this.dead.sort((a, b) => {
|
||||
const conn1 = this.connections.get(a)
|
||||
const conn2 = this.connections.get(b)
|
||||
return conn1.resurrectTimeout - conn2.resurrectTimeout
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* If enabled, tries to resurrect a connection with the given
|
||||
* resurrect strategy ('ping', 'optimistic', 'none').
|
||||
*
|
||||
* @param {object} { now, requestId }
|
||||
* @param {function} callback (isAlive, connection)
|
||||
*/
|
||||
resurrect (opts, callback = noop) {
|
||||
if (this.resurrectStrategy === 0 || this.dead.length === 0) {
|
||||
debug('Nothing to resurrect')
|
||||
callback(null, null)
|
||||
return
|
||||
}
|
||||
|
||||
// the dead list is sorted in ascending order based on the timeout
|
||||
// so the first element will always be the one with the smaller timeout
|
||||
const connection = this.connections.get(this.dead[0])
|
||||
if ((opts.now || Date.now()) < connection.resurrectTimeout) {
|
||||
debug('Nothing to resurrect')
|
||||
callback(null, null)
|
||||
return
|
||||
}
|
||||
|
||||
const { id } = connection
|
||||
|
||||
// ping strategy
|
||||
if (this.resurrectStrategy === 1) {
|
||||
connection.request({
|
||||
method: 'HEAD',
|
||||
path: '/',
|
||||
timeout: this.pingTimeout
|
||||
}, (err, response) => {
|
||||
var isAlive = true
|
||||
const statusCode = response !== null ? response.statusCode : 0
|
||||
if (err != null ||
|
||||
(statusCode === 502 || statusCode === 503 || statusCode === 504)) {
|
||||
debug(`Resurrect: connection '${id}' is still dead`)
|
||||
this.markDead(connection)
|
||||
isAlive = false
|
||||
} else {
|
||||
debug(`Resurrect: connection '${id}' is now alive`)
|
||||
this.markAlive(connection)
|
||||
}
|
||||
this.emit('resurrect', null, {
|
||||
strategy: 'ping',
|
||||
name: opts.name,
|
||||
request: { id: opts.requestId },
|
||||
isAlive,
|
||||
connection
|
||||
})
|
||||
callback(isAlive, connection)
|
||||
})
|
||||
// optimistic strategy
|
||||
} else {
|
||||
debug(`Resurrect: optimistic resurrection for connection '${id}'`)
|
||||
this.dead.splice(this.dead.indexOf(id), 1)
|
||||
connection.status = Connection.statuses.ALIVE
|
||||
this.emit('resurrect', null, {
|
||||
strategy: 'optimistic',
|
||||
name: opts.name,
|
||||
request: { id: opts.requestId },
|
||||
isAlive: true,
|
||||
connection
|
||||
})
|
||||
// eslint-disable-next-line standard/no-callback-literal
|
||||
callback(true, connection)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an alive connection if present,
|
||||
* otherwise returns null.
|
||||
* By default it filters the `master` only nodes.
|
||||
* It uses the selector to choose which
|
||||
* connection return.
|
||||
*
|
||||
* @param {object} options (filter and selector)
|
||||
* @returns {object|null} connection
|
||||
*/
|
||||
getConnection (opts = {}) {
|
||||
const filter = opts.filter || (() => true)
|
||||
const selector = opts.selector || (c => c[0])
|
||||
|
||||
// TODO: can we cache this?
|
||||
const connections = []
|
||||
for (var connection of this.connections.values()) {
|
||||
if (connection.status === Connection.statuses.ALIVE) {
|
||||
if (filter(connection) === true) {
|
||||
connections.push(connection)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (connections.length === 0) return null
|
||||
|
||||
return selector(connections)
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a new connection to the pool.
|
||||
*
|
||||
* @param {object|string} host
|
||||
* @returns {ConnectionPool}
|
||||
*/
|
||||
addConnection (opts) {
|
||||
if (Array.isArray(opts)) {
|
||||
opts.forEach(o => this.addConnection(o))
|
||||
return
|
||||
}
|
||||
|
||||
if (typeof opts === 'string') {
|
||||
opts = this.urlToHost(opts)
|
||||
}
|
||||
// if a given node has auth data we store it in the connection pool,
|
||||
// so if we add new nodes without auth data (after a sniff for example)
|
||||
// we can add it to them once the connection instance has been created
|
||||
if (opts.url.username !== '' && opts.url.password !== '') {
|
||||
this._auth = {
|
||||
username: decodeURIComponent(opts.url.username),
|
||||
password: decodeURIComponent(opts.url.password)
|
||||
}
|
||||
opts.auth = this._auth
|
||||
}
|
||||
|
||||
if (this._auth != null) {
|
||||
if (opts.auth == null || (opts.auth.username == null && opts.auth.password == null)) {
|
||||
opts.auth = this._auth
|
||||
opts.url.username = this._auth.username
|
||||
opts.url.password = this._auth.password
|
||||
}
|
||||
}
|
||||
|
||||
if (opts.ssl == null) opts.ssl = this._ssl
|
||||
if (opts.agent == null) opts.agent = this._agent
|
||||
|
||||
const connection = new this.Connection(opts)
|
||||
debug('Adding a new connection', connection)
|
||||
if (this.connections.has(connection.id)) {
|
||||
throw new Error(`Connection with id '${connection.id}' is already present`)
|
||||
}
|
||||
this.connections.set(connection.id, connection)
|
||||
return connection
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes a new connection to the pool.
|
||||
*
|
||||
* @param {object} connection
|
||||
* @returns {ConnectionPool}
|
||||
*/
|
||||
removeConnection (connection) {
|
||||
debug('Removing connection', connection)
|
||||
connection.close(noop)
|
||||
const { id } = connection
|
||||
this.connections.delete(id)
|
||||
var index = this.dead.indexOf(id)
|
||||
if (index > -1) this.dead.splice(index, 1)
|
||||
return this
|
||||
}
|
||||
|
||||
/**
|
||||
* Empties the connection pool.
|
||||
*
|
||||
* @returns {ConnectionPool}
|
||||
*/
|
||||
empty (callback) {
|
||||
debug('Emptying the connection pool')
|
||||
var openConnections = this.connections.size
|
||||
this.connections.forEach(connection => {
|
||||
connection.close(() => {
|
||||
if (--openConnections === 0) {
|
||||
this.connections = new Map()
|
||||
this.dead = []
|
||||
callback()
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the ConnectionPool with new connections.
|
||||
*
|
||||
* @param {array} array of connections
|
||||
* @returns {ConnectionPool}
|
||||
*/
|
||||
update (connections) {
|
||||
debug('Updating the connection pool')
|
||||
for (var i = 0; i < connections.length; i++) {
|
||||
const connection = connections[i]
|
||||
// if we already have a given connection in the pool
|
||||
// we check its status, if is 'alive', we do nothing,
|
||||
// if 'dead' we mark it as alive, we do not close the old
|
||||
// one to avoid socket issues
|
||||
if (this.connections.has(connection.id) === true) {
|
||||
debug(`The connection with id '${connection.id}' is already present`)
|
||||
const oldConnection = this.connections.get(connection.id)
|
||||
if (oldConnection.status === Connection.statuses.DEAD) {
|
||||
this.markAlive(oldConnection)
|
||||
}
|
||||
// in case the user has passed a single url (or an array of urls),
|
||||
// the connection id will be the full href; to avoid closing valid connections
|
||||
// because are not present in the pool, we check also the node url,
|
||||
// and if is already present we update its id with the ES provided one.
|
||||
} else if (this.connections.has(connection.url.href) === true) {
|
||||
const oldConnection = this.connections.get(connection.url.href)
|
||||
this.connections.delete(connection.url.href)
|
||||
oldConnection.id = connection.id
|
||||
this.connections.set(connection.id, oldConnection)
|
||||
if (oldConnection.status === Connection.statuses.DEAD) {
|
||||
this.markAlive(oldConnection)
|
||||
}
|
||||
} else {
|
||||
this.addConnection(connection)
|
||||
}
|
||||
}
|
||||
|
||||
const ids = connections.map(c => c.id)
|
||||
// remove all the dead connections and old connections
|
||||
for (const connection of this.connections.values()) {
|
||||
if (ids.indexOf(connection.id) === -1) {
|
||||
this.removeConnection(connection)
|
||||
}
|
||||
}
|
||||
|
||||
return this
|
||||
}
|
||||
|
||||
/**
|
||||
* Transforms the nodes objects to a host object.
|
||||
*
|
||||
* @param {object} nodes
|
||||
* @returns {array} hosts
|
||||
*/
|
||||
nodesToHost (nodes, protocol) {
|
||||
const ids = Object.keys(nodes)
|
||||
const hosts = []
|
||||
|
||||
for (var i = 0, len = ids.length; i < len; i++) {
|
||||
const node = nodes[ids[i]]
|
||||
// If there is no protocol in
|
||||
// the `publish_address` new URL will throw
|
||||
// the publish_address can have two forms:
|
||||
// - ip:port
|
||||
// - hostname/ip:port
|
||||
// if we encounter the second case, we should
|
||||
// use the hostname instead of the ip
|
||||
var address = node.http.publish_address
|
||||
const parts = address.split('/')
|
||||
// the url is in the form of hostname/ip:port
|
||||
if (parts.length > 1) {
|
||||
const hostname = parts[0]
|
||||
const port = parts[1].match(/((?::))(?:[0-9]+)$/g)[0].slice(1)
|
||||
address = `${hostname}:${port}`
|
||||
}
|
||||
|
||||
address = address.slice(0, 4) === 'http'
|
||||
? address
|
||||
: `${protocol}//${address}`
|
||||
const roles = node.roles.reduce((acc, role) => {
|
||||
acc[role] = true
|
||||
return acc
|
||||
}, {})
|
||||
|
||||
hosts.push({
|
||||
url: new URL(address),
|
||||
id: ids[i],
|
||||
roles: Object.assign({
|
||||
[Connection.roles.MASTER]: true,
|
||||
[Connection.roles.DATA]: true,
|
||||
[Connection.roles.INGEST]: true,
|
||||
[Connection.roles.ML]: false
|
||||
}, roles)
|
||||
})
|
||||
}
|
||||
|
||||
return hosts
|
||||
}
|
||||
|
||||
/**
|
||||
* Transforms an url string to a host object
|
||||
*
|
||||
* @param {string} url
|
||||
* @returns {object} host
|
||||
*/
|
||||
urlToHost (url) {
|
||||
return {
|
||||
url: new URL(url)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ConnectionPool.resurrectStrategies = {
|
||||
none: 0,
|
||||
ping: 1,
|
||||
optimistic: 2
|
||||
}
|
||||
|
||||
// https://gist.github.com/guilhermepontes/17ae0cc71fa2b13ea8c20c94c5c35dc4
|
||||
// const shuffleArray = arr => arr
|
||||
// .map(a => [Math.random(), a])
|
||||
// .sort((a, b) => a[0] - b[0])
|
||||
// .map(a => a[1])
|
||||
|
||||
module.exports = ConnectionPool
|
||||
@ -21,6 +21,7 @@
|
||||
|
||||
const { stringify } = require('querystring')
|
||||
const debug = require('debug')('elasticsearch')
|
||||
const sjson = require('secure-json-parse')
|
||||
const { SerializationError, DeserializationError } = require('./errors')
|
||||
|
||||
class Serializer {
|
||||
@ -37,7 +38,7 @@ class Serializer {
|
||||
deserialize (json) {
|
||||
debug('Deserializing', json)
|
||||
try {
|
||||
var object = JSON.parse(json)
|
||||
var object = sjson.parse(json)
|
||||
} catch (err) {
|
||||
throw new DeserializationError(err.message)
|
||||
}
|
||||
|
||||
11
lib/Transport.d.ts
vendored
11
lib/Transport.d.ts
vendored
@ -17,7 +17,7 @@
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import ConnectionPool from './ConnectionPool';
|
||||
import { ConnectionPool, CloudConnectionPool } from './pool';
|
||||
import Connection from './Connection';
|
||||
import Serializer from './Serializer';
|
||||
|
||||
@ -38,7 +38,7 @@ declare type emitFn = (event: string | symbol, ...args: any[]) => boolean;
|
||||
|
||||
interface TransportOptions {
|
||||
emit: emitFn & noopFn;
|
||||
connectionPool: ConnectionPool;
|
||||
connectionPool: ConnectionPool | CloudConnectionPool;
|
||||
serializer: Serializer;
|
||||
maxRetries: number;
|
||||
requestTimeout: number | string;
|
||||
@ -53,6 +53,7 @@ interface TransportOptions {
|
||||
headers?: anyObject;
|
||||
generateRequestId?: generateRequestIdFn;
|
||||
name: string;
|
||||
opaqueIdPrefix?: string;
|
||||
}
|
||||
|
||||
export interface RequestEvent<T = any, C = any> {
|
||||
@ -95,7 +96,7 @@ export interface TransportRequestParams {
|
||||
}
|
||||
|
||||
export interface TransportRequestOptions {
|
||||
ignore?: [number];
|
||||
ignore?: number[];
|
||||
requestTimeout?: number | string;
|
||||
maxRetries?: number;
|
||||
asStream?: boolean;
|
||||
@ -105,6 +106,7 @@ export interface TransportRequestOptions {
|
||||
id?: any;
|
||||
context?: any;
|
||||
warnings?: [string];
|
||||
opaqueId?: string;
|
||||
}
|
||||
|
||||
export interface TransportRequestCallback {
|
||||
@ -128,7 +130,7 @@ export default class Transport {
|
||||
DEFAULT: string;
|
||||
};
|
||||
emit: emitFn & noopFn;
|
||||
connectionPool: ConnectionPool;
|
||||
connectionPool: ConnectionPool | CloudConnectionPool;
|
||||
serializer: Serializer;
|
||||
maxRetries: number;
|
||||
requestTimeout: number;
|
||||
@ -136,6 +138,7 @@ export default class Transport {
|
||||
compression: 'gzip' | false;
|
||||
sniffInterval: number;
|
||||
sniffOnConnectionFault: boolean;
|
||||
opaqueIdPrefix: string | null;
|
||||
sniffEndpoint: string;
|
||||
_sniffEnabled: boolean;
|
||||
_nextSniff: number;
|
||||
|
||||
@ -56,6 +56,7 @@ class Transport {
|
||||
this.sniffEndpoint = opts.sniffEndpoint
|
||||
this.generateRequestId = opts.generateRequestId || generateRequestId()
|
||||
this.name = opts.name
|
||||
this.opaqueIdPrefix = opts.opaqueIdPrefix
|
||||
|
||||
this.nodeFilter = opts.nodeFilter || defaultNodeFilter
|
||||
if (typeof opts.nodeSelector === 'function') {
|
||||
@ -122,13 +123,18 @@ class Transport {
|
||||
const makeRequest = () => {
|
||||
if (meta.aborted === true) return
|
||||
meta.connection = this.getConnection({ requestId: meta.request.id })
|
||||
if (meta.connection === null) {
|
||||
return callback(new NoLivingConnectionsError('There are not living connections'), result)
|
||||
if (meta.connection == null) {
|
||||
return callback(new NoLivingConnectionsError(), result)
|
||||
}
|
||||
|
||||
// TODO: make this assignment FAST
|
||||
const headers = Object.assign({}, this.headers, options.headers)
|
||||
|
||||
if (options.opaqueId !== undefined) {
|
||||
headers['X-Opaque-Id'] = this.opaqueIdPrefix !== null
|
||||
? this.opaqueIdPrefix + options.opaqueId
|
||||
: options.opaqueId
|
||||
}
|
||||
|
||||
// handle json body
|
||||
if (params.body != null) {
|
||||
if (shouldSerialize(params.body) === true) {
|
||||
@ -138,15 +144,17 @@ class Transport {
|
||||
return callback(err, result)
|
||||
}
|
||||
}
|
||||
headers['Content-Type'] = headers['Content-Type'] || 'application/json'
|
||||
|
||||
if (compression === 'gzip') {
|
||||
if (isStream(params.body) === false) {
|
||||
params.body = intoStream(params.body).pipe(createGzip())
|
||||
} else {
|
||||
params.body = params.body.pipe(createGzip())
|
||||
if (params.body !== '') {
|
||||
headers['Content-Type'] = headers['Content-Type'] || 'application/json'
|
||||
if (compression === 'gzip') {
|
||||
if (isStream(params.body) === false) {
|
||||
params.body = intoStream(params.body).pipe(createGzip())
|
||||
} else {
|
||||
params.body = params.body.pipe(createGzip())
|
||||
}
|
||||
headers['Content-Encoding'] = compression
|
||||
}
|
||||
headers['Content-Encoding'] = compression
|
||||
}
|
||||
|
||||
if (isStream(params.body) === false) {
|
||||
@ -329,10 +337,12 @@ class Transport {
|
||||
if (this._sniffEnabled === true && now > this._nextSniff) {
|
||||
this.sniff({ reason: Transport.sniffReasons.SNIFF_INTERVAL, requestId: opts.requestId })
|
||||
}
|
||||
this.connectionPool.resurrect({ now, requestId: opts.requestId, name: this.name })
|
||||
return this.connectionPool.getConnection({
|
||||
filter: this.nodeFilter,
|
||||
selector: this.nodeSelector
|
||||
selector: this.nodeSelector,
|
||||
requestId: opts.requestId,
|
||||
name: this.name,
|
||||
now
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@ -51,7 +51,7 @@ class NoLivingConnectionsError extends ElasticsearchClientError {
|
||||
super(message)
|
||||
Error.captureStackTrace(this, NoLivingConnectionsError)
|
||||
this.name = 'NoLivingConnectionsError'
|
||||
this.message = message || 'No Living Connections Error'
|
||||
this.message = message || 'Given the configuration, the ConnectionPool was not able to find a usable Connection for this request.'
|
||||
this.meta = meta
|
||||
}
|
||||
}
|
||||
|
||||
254
lib/pool/BaseConnectionPool.js
Normal file
254
lib/pool/BaseConnectionPool.js
Normal file
@ -0,0 +1,254 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch B.V. under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch B.V. licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
'use strict'
|
||||
|
||||
const { URL } = require('url')
|
||||
const debug = require('debug')('elasticsearch')
|
||||
const Connection = require('../Connection')
|
||||
const noop = () => {}
|
||||
|
||||
class BaseConnectionPool {
|
||||
constructor (opts) {
|
||||
// list of nodes and weights
|
||||
this.connections = []
|
||||
// how many nodes we have in our scheduler
|
||||
this.size = this.connections.length
|
||||
this.Connection = opts.Connection
|
||||
this.emit = opts.emit || noop
|
||||
this.auth = opts.auth || null
|
||||
this._ssl = opts.ssl
|
||||
this._agent = opts.agent
|
||||
}
|
||||
|
||||
getConnection () {
|
||||
throw new Error('getConnection must be implemented')
|
||||
}
|
||||
|
||||
markAlive () {
|
||||
return this
|
||||
}
|
||||
|
||||
markDead () {
|
||||
return this
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new connection instance.
|
||||
*/
|
||||
createConnection (opts) {
|
||||
if (typeof opts === 'string') {
|
||||
opts = this.urlToHost(opts)
|
||||
}
|
||||
|
||||
if (this.auth !== null) {
|
||||
opts.auth = this.auth
|
||||
} else if (opts.url.username !== '' && opts.url.password !== '') {
|
||||
opts.auth = {
|
||||
username: decodeURIComponent(opts.url.username),
|
||||
password: decodeURIComponent(opts.url.password)
|
||||
}
|
||||
}
|
||||
|
||||
if (opts.ssl == null) opts.ssl = this._ssl
|
||||
if (opts.agent == null) opts.agent = this._agent
|
||||
|
||||
const connection = new this.Connection(opts)
|
||||
|
||||
for (const conn of this.connections) {
|
||||
if (conn.id === connection.id) {
|
||||
throw new Error(`Connection with id '${connection.id}' is already present`)
|
||||
}
|
||||
}
|
||||
|
||||
return connection
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a new connection to the pool.
|
||||
*
|
||||
* @param {object|string} host
|
||||
* @returns {ConnectionPool}
|
||||
*/
|
||||
addConnection (opts) {
|
||||
if (Array.isArray(opts)) {
|
||||
return opts.forEach(o => this.addConnection(o))
|
||||
}
|
||||
|
||||
if (typeof opts === 'string') {
|
||||
opts = this.urlToHost(opts)
|
||||
}
|
||||
|
||||
const connectionById = this.connections.find(c => c.id === opts.id)
|
||||
const connectionByUrl = this.connections.find(c => c.id === opts.url.href)
|
||||
|
||||
if (connectionById || connectionByUrl) {
|
||||
throw new Error(`Connection with id '${opts.id || opts.url.href}' is already present`)
|
||||
}
|
||||
|
||||
this.update([...this.connections, opts])
|
||||
return this.connections[this.size - 1]
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes a new connection to the pool.
|
||||
*
|
||||
* @param {object} connection
|
||||
* @returns {ConnectionPool}
|
||||
*/
|
||||
removeConnection (connection) {
|
||||
debug('Removing connection', connection)
|
||||
return this.update(this.connections.filter(c => c.id !== connection.id))
|
||||
}
|
||||
|
||||
/**
|
||||
* Empties the connection pool.
|
||||
*
|
||||
* @returns {ConnectionPool}
|
||||
*/
|
||||
empty (callback) {
|
||||
debug('Emptying the connection pool')
|
||||
var openConnections = this.size
|
||||
this.connections.forEach(connection => {
|
||||
connection.close(() => {
|
||||
if (--openConnections === 0) {
|
||||
this.connections = []
|
||||
this.size = this.connections.length
|
||||
callback()
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the ConnectionPool with new connections.
|
||||
*
|
||||
* @param {array} array of connections
|
||||
* @returns {ConnectionPool}
|
||||
*/
|
||||
update (nodes) {
|
||||
debug('Updating the connection pool')
|
||||
const newConnections = []
|
||||
const oldConnections = []
|
||||
|
||||
for (const node of nodes) {
|
||||
// if we already have a given connection in the pool
|
||||
// we mark it as alive and we do not close the connection
|
||||
// to avoid socket issues
|
||||
const connectionById = this.connections.find(c => c.id === node.id)
|
||||
const connectionByUrl = this.connections.find(c => c.id === node.url.href)
|
||||
if (connectionById) {
|
||||
debug(`The connection with id '${node.id}' is already present`)
|
||||
this.markAlive(connectionById)
|
||||
newConnections.push(connectionById)
|
||||
// in case the user has passed a single url (or an array of urls),
|
||||
// the connection id will be the full href; to avoid closing valid connections
|
||||
// because are not present in the pool, we check also the node url,
|
||||
// and if is already present we update its id with the ES provided one.
|
||||
} else if (connectionByUrl) {
|
||||
connectionByUrl.id = node.id
|
||||
this.markAlive(connectionByUrl)
|
||||
newConnections.push(connectionByUrl)
|
||||
} else {
|
||||
newConnections.push(this.createConnection(node))
|
||||
}
|
||||
}
|
||||
|
||||
const ids = nodes.map(c => c.id)
|
||||
// remove all the dead connections and old connections
|
||||
for (const connection of this.connections) {
|
||||
if (ids.indexOf(connection.id) === -1) {
|
||||
oldConnections.push(connection)
|
||||
}
|
||||
}
|
||||
|
||||
// close old connections
|
||||
oldConnections.forEach(connection => connection.close())
|
||||
|
||||
this.connections = newConnections
|
||||
this.size = this.connections.length
|
||||
|
||||
return this
|
||||
}
|
||||
|
||||
/**
|
||||
* Transforms the nodes objects to a host object.
|
||||
*
|
||||
* @param {object} nodes
|
||||
* @returns {array} hosts
|
||||
*/
|
||||
nodesToHost (nodes, protocol) {
|
||||
const ids = Object.keys(nodes)
|
||||
const hosts = []
|
||||
|
||||
for (var i = 0, len = ids.length; i < len; i++) {
|
||||
const node = nodes[ids[i]]
|
||||
// If there is no protocol in
|
||||
// the `publish_address` new URL will throw
|
||||
// the publish_address can have two forms:
|
||||
// - ip:port
|
||||
// - hostname/ip:port
|
||||
// if we encounter the second case, we should
|
||||
// use the hostname instead of the ip
|
||||
var address = node.http.publish_address
|
||||
const parts = address.split('/')
|
||||
// the url is in the form of hostname/ip:port
|
||||
if (parts.length > 1) {
|
||||
const hostname = parts[0]
|
||||
const port = parts[1].match(/((?::))(?:[0-9]+)$/g)[0].slice(1)
|
||||
address = `${hostname}:${port}`
|
||||
}
|
||||
|
||||
address = address.slice(0, 4) === 'http'
|
||||
? address
|
||||
: `${protocol}//${address}`
|
||||
const roles = node.roles.reduce((acc, role) => {
|
||||
acc[role] = true
|
||||
return acc
|
||||
}, {})
|
||||
|
||||
hosts.push({
|
||||
url: new URL(address),
|
||||
id: ids[i],
|
||||
roles: Object.assign({
|
||||
[Connection.roles.MASTER]: false,
|
||||
[Connection.roles.DATA]: false,
|
||||
[Connection.roles.INGEST]: false,
|
||||
[Connection.roles.ML]: false
|
||||
}, roles)
|
||||
})
|
||||
}
|
||||
|
||||
return hosts
|
||||
}
|
||||
|
||||
/**
|
||||
* Transforms an url string to a host object
|
||||
*
|
||||
* @param {string} url
|
||||
* @returns {object} host
|
||||
*/
|
||||
urlToHost (url) {
|
||||
return {
|
||||
url: new URL(url)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = BaseConnectionPool
|
||||
64
lib/pool/CloudConnectionPool.js
Normal file
64
lib/pool/CloudConnectionPool.js
Normal file
@ -0,0 +1,64 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch B.V. under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch B.V. licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
'use strict'
|
||||
|
||||
const BaseConnectionPool = require('./BaseConnectionPool')
|
||||
|
||||
class CloudConnectionPool extends BaseConnectionPool {
|
||||
constructor (opts = {}) {
|
||||
super(opts)
|
||||
this.cloudConnection = null
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the only cloud connection.
|
||||
*
|
||||
* @returns {object} connection
|
||||
*/
|
||||
getConnection () {
|
||||
return this.cloudConnection
|
||||
}
|
||||
|
||||
/**
|
||||
* Empties the connection pool.
|
||||
*
|
||||
* @returns {ConnectionPool}
|
||||
*/
|
||||
empty (callback) {
|
||||
super.empty(() => {
|
||||
this.cloudConnection = null
|
||||
callback()
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the ConnectionPool with new connections.
|
||||
*
|
||||
* @param {array} array of connections
|
||||
* @returns {ConnectionPool}
|
||||
*/
|
||||
update (connections) {
|
||||
super.update(connections)
|
||||
this.cloudConnection = this.connections[0]
|
||||
return this
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = CloudConnectionPool
|
||||
247
lib/pool/ConnectionPool.js
Normal file
247
lib/pool/ConnectionPool.js
Normal file
@ -0,0 +1,247 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch B.V. under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch B.V. licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
'use strict'
|
||||
|
||||
const BaseConnectionPool = require('./BaseConnectionPool')
|
||||
const assert = require('assert')
|
||||
const debug = require('debug')('elasticsearch')
|
||||
const Connection = require('../Connection')
|
||||
const noop = () => {}
|
||||
|
||||
class ConnectionPool extends BaseConnectionPool {
|
||||
constructor (opts = {}) {
|
||||
super(opts)
|
||||
|
||||
this.dead = []
|
||||
// the resurrect timeout is 60s
|
||||
this.resurrectTimeout = 1000 * 60
|
||||
// number of consecutive failures after which
|
||||
// the timeout doesn't increase
|
||||
this.resurrectTimeoutCutoff = 5
|
||||
this.pingTimeout = opts.pingTimeout
|
||||
this._sniffEnabled = opts.sniffEnabled || false
|
||||
|
||||
const resurrectStrategy = opts.resurrectStrategy || 'ping'
|
||||
this.resurrectStrategy = ConnectionPool.resurrectStrategies[resurrectStrategy]
|
||||
assert(
|
||||
this.resurrectStrategy != null,
|
||||
`Invalid resurrection strategy: '${resurrectStrategy}'`
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Marks a connection as 'alive'.
|
||||
* If needed removes the connection from the dead list
|
||||
* and then resets the `deadCount`.
|
||||
*
|
||||
* @param {object} connection
|
||||
*/
|
||||
markAlive (connection) {
|
||||
const { id } = connection
|
||||
debug(`Marking as 'alive' connection '${id}'`)
|
||||
const index = this.dead.indexOf(id)
|
||||
if (index > -1) this.dead.splice(index, 1)
|
||||
connection.status = Connection.statuses.ALIVE
|
||||
connection.deadCount = 0
|
||||
connection.resurrectTimeout = 0
|
||||
return this
|
||||
}
|
||||
|
||||
/**
|
||||
* Marks a connection as 'dead'.
|
||||
* If needed adds the connection to the dead list
|
||||
* and then increments the `deadCount`.
|
||||
*
|
||||
* @param {object} connection
|
||||
*/
|
||||
markDead (connection) {
|
||||
const { id } = connection
|
||||
debug(`Marking as 'dead' connection '${id}'`)
|
||||
if (this.dead.indexOf(id) === -1) {
|
||||
// It might happen that `markDead` is called jsut after
|
||||
// a pool update, and in such case we will add to the dead
|
||||
// list a node that no longer exist. The following check verify
|
||||
// that the connection is still part of the pool before
|
||||
// marking it as dead.
|
||||
for (var i = 0; i < this.size; i++) {
|
||||
if (this.connections[i].id === id) {
|
||||
this.dead.push(id)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
connection.status = Connection.statuses.DEAD
|
||||
connection.deadCount++
|
||||
// resurrectTimeout formula:
|
||||
// `resurrectTimeout * 2 ** min(deadCount - 1, resurrectTimeoutCutoff)`
|
||||
connection.resurrectTimeout = Date.now() + this.resurrectTimeout * Math.pow(
|
||||
2, Math.min(connection.deadCount - 1, this.resurrectTimeoutCutoff)
|
||||
)
|
||||
|
||||
// sort the dead list in ascending order
|
||||
// based on the resurrectTimeout
|
||||
this.dead.sort((a, b) => {
|
||||
const conn1 = this.connections.find(c => c.id === a)
|
||||
const conn2 = this.connections.find(c => c.id === b)
|
||||
return conn1.resurrectTimeout - conn2.resurrectTimeout
|
||||
})
|
||||
|
||||
return this
|
||||
}
|
||||
|
||||
/**
|
||||
* If enabled, tries to resurrect a connection with the given
|
||||
* resurrect strategy ('ping', 'optimistic', 'none').
|
||||
*
|
||||
* @param {object} { now, requestId }
|
||||
* @param {function} callback (isAlive, connection)
|
||||
*/
|
||||
resurrect (opts, callback = noop) {
|
||||
if (this.resurrectStrategy === 0 || this.dead.length === 0) {
|
||||
debug('Nothing to resurrect')
|
||||
callback(null, null)
|
||||
return
|
||||
}
|
||||
|
||||
// the dead list is sorted in ascending order based on the timeout
|
||||
// so the first element will always be the one with the smaller timeout
|
||||
const connection = this.connections.find(c => c.id === this.dead[0])
|
||||
if ((opts.now || Date.now()) < connection.resurrectTimeout) {
|
||||
debug('Nothing to resurrect')
|
||||
callback(null, null)
|
||||
return
|
||||
}
|
||||
|
||||
const { id } = connection
|
||||
|
||||
// ping strategy
|
||||
if (this.resurrectStrategy === 1) {
|
||||
connection.request({
|
||||
method: 'HEAD',
|
||||
path: '/',
|
||||
timeout: this.pingTimeout
|
||||
}, (err, response) => {
|
||||
var isAlive = true
|
||||
const statusCode = response !== null ? response.statusCode : 0
|
||||
if (err != null ||
|
||||
(statusCode === 502 || statusCode === 503 || statusCode === 504)) {
|
||||
debug(`Resurrect: connection '${id}' is still dead`)
|
||||
this.markDead(connection)
|
||||
isAlive = false
|
||||
} else {
|
||||
debug(`Resurrect: connection '${id}' is now alive`)
|
||||
this.markAlive(connection)
|
||||
}
|
||||
this.emit('resurrect', null, {
|
||||
strategy: 'ping',
|
||||
name: opts.name,
|
||||
request: { id: opts.requestId },
|
||||
isAlive,
|
||||
connection
|
||||
})
|
||||
callback(isAlive, connection)
|
||||
})
|
||||
// optimistic strategy
|
||||
} else {
|
||||
debug(`Resurrect: optimistic resurrection for connection '${id}'`)
|
||||
this.dead.splice(this.dead.indexOf(id), 1)
|
||||
connection.status = Connection.statuses.ALIVE
|
||||
this.emit('resurrect', null, {
|
||||
strategy: 'optimistic',
|
||||
name: opts.name,
|
||||
request: { id: opts.requestId },
|
||||
isAlive: true,
|
||||
connection
|
||||
})
|
||||
// eslint-disable-next-line standard/no-callback-literal
|
||||
callback(true, connection)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an alive connection if present,
|
||||
* otherwise returns a dead connection.
|
||||
* By default it filters the `master` only nodes.
|
||||
* It uses the selector to choose which
|
||||
* connection return.
|
||||
*
|
||||
* @param {object} options (filter and selector)
|
||||
* @returns {object|null} connection
|
||||
*/
|
||||
getConnection (opts = {}) {
|
||||
const filter = opts.filter || (() => true)
|
||||
const selector = opts.selector || (c => c[0])
|
||||
|
||||
this.resurrect({
|
||||
now: opts.now,
|
||||
requestId: opts.requestId,
|
||||
name: opts.name
|
||||
})
|
||||
|
||||
const noAliveConnections = this.size === this.dead.length
|
||||
|
||||
// TODO: can we cache this?
|
||||
const connections = []
|
||||
for (var i = 0; i < this.size; i++) {
|
||||
const connection = this.connections[i]
|
||||
if (noAliveConnections || connection.status === Connection.statuses.ALIVE) {
|
||||
if (filter(connection) === true) {
|
||||
connections.push(connection)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (connections.length === 0) return null
|
||||
|
||||
return selector(connections)
|
||||
}
|
||||
|
||||
/**
|
||||
* Empties the connection pool.
|
||||
*
|
||||
* @returns {ConnectionPool}
|
||||
*/
|
||||
empty (callback) {
|
||||
super.empty(() => {
|
||||
this.dead = []
|
||||
callback()
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the ConnectionPool with new connections.
|
||||
*
|
||||
* @param {array} array of connections
|
||||
* @returns {ConnectionPool}
|
||||
*/
|
||||
update (connections) {
|
||||
super.update(connections)
|
||||
this.dead = []
|
||||
return this
|
||||
}
|
||||
}
|
||||
|
||||
ConnectionPool.resurrectStrategies = {
|
||||
none: 0,
|
||||
ping: 1,
|
||||
optimistic: 2
|
||||
}
|
||||
|
||||
module.exports = ConnectionPool
|
||||
129
lib/ConnectionPool.d.ts → lib/pool/index.d.ts
vendored
129
lib/ConnectionPool.d.ts → lib/pool/index.d.ts
vendored
@ -20,29 +20,54 @@
|
||||
/// <reference types="node" />
|
||||
|
||||
import { SecureContextOptions } from 'tls';
|
||||
import Connection, { AgentOptions } from './Connection';
|
||||
import { nodeFilterFn, nodeSelectorFn } from './Transport';
|
||||
import Connection, { AgentOptions } from '../Connection';
|
||||
import { nodeFilterFn, nodeSelectorFn } from '../Transport';
|
||||
|
||||
interface ConnectionPoolOptions {
|
||||
interface BaseConnectionPoolOptions {
|
||||
ssl?: SecureContextOptions;
|
||||
agent?: AgentOptions;
|
||||
auth?: BasicAuth | ApiKeyAuth;
|
||||
emit: (event: string | symbol, ...args: any[]) => boolean;
|
||||
pingTimeout?: number;
|
||||
Connection: typeof Connection;
|
||||
resurrectStrategy?: string;
|
||||
}
|
||||
|
||||
export interface getConnectionOptions {
|
||||
filter?: nodeFilterFn;
|
||||
selector?: nodeSelectorFn;
|
||||
interface ConnectionPoolOptions extends BaseConnectionPoolOptions {
|
||||
pingTimeout?: number;
|
||||
resurrectStrategy?: string;
|
||||
sniffEnabled?: boolean;
|
||||
}
|
||||
|
||||
export interface resurrectOptions {
|
||||
interface getConnectionOptions {
|
||||
filter?: nodeFilterFn;
|
||||
selector?: nodeSelectorFn;
|
||||
requestId?: string | number;
|
||||
name?: string;
|
||||
now?: number;
|
||||
}
|
||||
|
||||
interface ApiKeyAuth {
|
||||
apiKey:
|
||||
| string
|
||||
| {
|
||||
id: string;
|
||||
api_key: string;
|
||||
}
|
||||
}
|
||||
|
||||
interface BasicAuth {
|
||||
username: string;
|
||||
password: string;
|
||||
}
|
||||
|
||||
interface resurrectOptions {
|
||||
now?: number;
|
||||
requestId: string;
|
||||
name: string;
|
||||
}
|
||||
|
||||
export interface ResurrectEvent {
|
||||
interface ResurrectEvent {
|
||||
strategy: string;
|
||||
isAlive: boolean;
|
||||
connection: Connection;
|
||||
@ -52,23 +77,14 @@ export interface ResurrectEvent {
|
||||
};
|
||||
}
|
||||
|
||||
export default class ConnectionPool {
|
||||
static resurrectStrategies: {
|
||||
none: number;
|
||||
ping: number;
|
||||
optimistic: number;
|
||||
};
|
||||
connections: any;
|
||||
dead: string[];
|
||||
|
||||
declare class BaseConnectionPool {
|
||||
connections: Connection[];
|
||||
_ssl: SecureContextOptions | null;
|
||||
_agent: AgentOptions | null;
|
||||
_sniffEnabled: boolean;
|
||||
resurrectTimeout: number;
|
||||
resurrectTimeoutCutoff: number;
|
||||
pingTimeout: number;
|
||||
auth: BasicAuth | ApiKeyAuth;
|
||||
Connection: typeof Connection;
|
||||
resurrectStrategy: number;
|
||||
constructor(opts?: ConnectionPoolOptions);
|
||||
constructor(opts?: BaseConnectionPoolOptions);
|
||||
/**
|
||||
* Marks a connection as 'alive'.
|
||||
* If needed removes the connection from the dead list
|
||||
@ -76,7 +92,7 @@ export default class ConnectionPool {
|
||||
*
|
||||
* @param {object} connection
|
||||
*/
|
||||
markAlive(connection: Connection): void;
|
||||
markAlive(connection: Connection): this;
|
||||
/**
|
||||
* Marks a connection as 'dead'.
|
||||
* If needed adds the connection to the dead list
|
||||
@ -84,18 +100,10 @@ export default class ConnectionPool {
|
||||
*
|
||||
* @param {object} connection
|
||||
*/
|
||||
markDead(connection: Connection): void;
|
||||
/**
|
||||
* If enabled, tries to resurrect a connection with the given
|
||||
* resurrect strategy ('ping', 'optimistic', 'none').
|
||||
*
|
||||
* @param {object} { now, requestId, name }
|
||||
* @param {function} callback (isAlive, connection)
|
||||
*/
|
||||
resurrect(opts: resurrectOptions, callback?: (isAlive: boolean | null, connection: Connection | null) => void): void;
|
||||
markDead(connection: Connection): this;
|
||||
/**
|
||||
* Returns an alive connection if present,
|
||||
* otherwise returns null.
|
||||
* otherwise returns a dead connection.
|
||||
* By default it filters the `master` only nodes.
|
||||
* It uses the selector to choose which
|
||||
* connection return.
|
||||
@ -110,27 +118,27 @@ export default class ConnectionPool {
|
||||
* @param {object|string} host
|
||||
* @returns {ConnectionPool}
|
||||
*/
|
||||
addConnection(opts: any): Connection | void;
|
||||
addConnection(opts: any): Connection;
|
||||
/**
|
||||
* Removes a new connection to the pool.
|
||||
*
|
||||
* @param {object} connection
|
||||
* @returns {ConnectionPool}
|
||||
*/
|
||||
removeConnection(connection: Connection): ConnectionPool;
|
||||
removeConnection(connection: Connection): this;
|
||||
/**
|
||||
* Empties the connection pool.
|
||||
*
|
||||
* @returns {ConnectionPool}
|
||||
*/
|
||||
empty(): ConnectionPool;
|
||||
empty(): this;
|
||||
/**
|
||||
* Update the ConnectionPool with new connections.
|
||||
*
|
||||
* @param {array} array of connections
|
||||
* @returns {ConnectionPool}
|
||||
*/
|
||||
update(connections: Connection[]): ConnectionPool;
|
||||
update(connections: any[]): this;
|
||||
/**
|
||||
* Transforms the nodes objects to a host object.
|
||||
*
|
||||
@ -147,14 +155,57 @@ export default class ConnectionPool {
|
||||
urlToHost(url: string): any;
|
||||
}
|
||||
|
||||
declare class ConnectionPool extends BaseConnectionPool {
|
||||
static resurrectStrategies: {
|
||||
none: number;
|
||||
ping: number;
|
||||
optimistic: number;
|
||||
};
|
||||
dead: string[];
|
||||
_sniffEnabled: boolean;
|
||||
resurrectTimeout: number;
|
||||
resurrectTimeoutCutoff: number;
|
||||
pingTimeout: number;
|
||||
resurrectStrategy: number;
|
||||
constructor(opts?: ConnectionPoolOptions);
|
||||
|
||||
/**
|
||||
* If enabled, tries to resurrect a connection with the given
|
||||
* resurrect strategy ('ping', 'optimistic', 'none').
|
||||
*
|
||||
* @param {object} { now, requestId, name }
|
||||
* @param {function} callback (isAlive, connection)
|
||||
*/
|
||||
resurrect(opts: resurrectOptions, callback?: (isAlive: boolean | null, connection: Connection | null) => void): void;
|
||||
}
|
||||
|
||||
declare class CloudConnectionPool extends BaseConnectionPool {
|
||||
cloudConnection: Connection | null
|
||||
constructor(opts?: BaseConnectionPoolOptions);
|
||||
getConnection(): Connection;
|
||||
}
|
||||
|
||||
declare function defaultNodeFilter(node: Connection): boolean;
|
||||
declare function roundRobinSelector(): (connections: Connection[]) => Connection;
|
||||
declare function randomSelector(connections: Connection[]): Connection;
|
||||
|
||||
export declare const internals: {
|
||||
declare const internals: {
|
||||
defaultNodeFilter: typeof defaultNodeFilter;
|
||||
roundRobinSelector: typeof roundRobinSelector;
|
||||
randomSelector: typeof randomSelector;
|
||||
};
|
||||
|
||||
export {};
|
||||
export {
|
||||
// Interfaces
|
||||
ConnectionPoolOptions,
|
||||
getConnectionOptions,
|
||||
ApiKeyAuth,
|
||||
BasicAuth,
|
||||
internals,
|
||||
resurrectOptions,
|
||||
ResurrectEvent,
|
||||
// Classes
|
||||
BaseConnectionPool,
|
||||
ConnectionPool,
|
||||
CloudConnectionPool
|
||||
};
|
||||
30
lib/pool/index.js
Normal file
30
lib/pool/index.js
Normal file
@ -0,0 +1,30 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch B.V. under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch B.V. licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
'use strict'
|
||||
|
||||
const BaseConnectionPool = require('./BaseConnectionPool')
|
||||
const ConnectionPool = require('./ConnectionPool')
|
||||
const CloudConnectionPool = require('./CloudConnectionPool')
|
||||
|
||||
module.exports = {
|
||||
BaseConnectionPool,
|
||||
ConnectionPool,
|
||||
CloudConnectionPool
|
||||
}
|
||||
22
package.json
22
package.json
@ -4,7 +4,7 @@
|
||||
"main": "index.js",
|
||||
"types": "index.d.ts",
|
||||
"homepage": "http://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/index.html",
|
||||
"version": "6.8.0",
|
||||
"version": "6.8.8",
|
||||
"keywords": [
|
||||
"elasticsearch",
|
||||
"elastic",
|
||||
@ -19,13 +19,15 @@
|
||||
"test": "npm run lint && npm run test:unit && npm run test:behavior && npm run test:types",
|
||||
"test:unit": "tap test/unit/*.test.js -t 300 --no-coverage",
|
||||
"test:behavior": "tap test/behavior/*.test.js -t 300 --no-coverage",
|
||||
"test:integration": "tap test/integration/index.js -T --harmony --no-esm --no-coverage",
|
||||
"test:integration": "tap test/integration/index.js -T --no-coverage",
|
||||
"test:types": "tsc --project ./test/types/tsconfig.json",
|
||||
"test:coverage": "nyc npm run test:unit && nyc report --reporter=text-lcov > coverage.lcov && codecov",
|
||||
"test:coverage": "nyc tap test/unit/*.test.js test/behavior/*.test.js -t 300 && nyc report --reporter=text-lcov > coverage.lcov",
|
||||
"lint": "standard",
|
||||
"lint:fix": "standard --fix",
|
||||
"ci": "npm run license-checker && npm test && npm run test:integration && npm run test:coverage",
|
||||
"license-checker": "license-checker --production --onlyAllow='MIT;Apache-2.0;Apache1.1;ISC;BSD-3-Clause;BSD-2-Clause'"
|
||||
"license-checker": "license-checker --production --onlyAllow='MIT;Apache-2.0;Apache1.1;ISC;BSD-3-Clause;BSD-2-Clause'",
|
||||
"elasticsearch": "./scripts/es-docker.sh",
|
||||
"elasticsearch:xpack": "./scripts/es-docker-platinum.sh"
|
||||
},
|
||||
"author": {
|
||||
"name": "Tomas Della Vedova",
|
||||
@ -36,11 +38,10 @@
|
||||
"company": "Elasticsearch BV"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^11.13.7",
|
||||
"codecov": "^3.3.0",
|
||||
"@types/node": "^12.6.2",
|
||||
"convert-hrtime": "^3.0.0",
|
||||
"dedent": "^0.7.0",
|
||||
"deepmerge": "^3.2.0",
|
||||
"deepmerge": "^4.0.0",
|
||||
"dezalgo": "^1.0.3",
|
||||
"js-yaml": "^3.13.1",
|
||||
"license-checker": "^25.0.1",
|
||||
@ -53,9 +54,9 @@
|
||||
"simple-git": "^1.110.0",
|
||||
"simple-statistics": "^7.0.2",
|
||||
"split2": "^3.1.1",
|
||||
"standard": "^12.0.1",
|
||||
"standard": "^13.0.2",
|
||||
"stoppable": "^1.1.0",
|
||||
"tap": "^13.0.1",
|
||||
"tap": "^14.4.1",
|
||||
"typescript": "^3.4.5",
|
||||
"workq": "^2.1.0"
|
||||
},
|
||||
@ -65,7 +66,8 @@
|
||||
"into-stream": "^5.1.0",
|
||||
"ms": "^2.1.1",
|
||||
"once": "^1.4.0",
|
||||
"pump": "^3.0.0"
|
||||
"pump": "^3.0.0",
|
||||
"secure-json-parse": "^2.1.0"
|
||||
},
|
||||
"license": "Apache-2.0",
|
||||
"repository": {
|
||||
|
||||
@ -5,27 +5,67 @@ testnodecrt="/.ci/certs/testnode.crt"
|
||||
testnodekey="/.ci/certs/testnode.key"
|
||||
cacrt="/.ci/certs/ca.crt"
|
||||
|
||||
exec docker run \
|
||||
--rm \
|
||||
-e "node.attr.testattr=test" \
|
||||
-e "path.repo=/tmp" \
|
||||
-e "repositories.url.allowed_urls=http://snapshot.*" \
|
||||
-e "discovery.type=single-node" \
|
||||
-e "ES_JAVA_OPTS=-Xms1g -Xmx1g" \
|
||||
-e "ELASTIC_PASSWORD=changeme" \
|
||||
-e "xpack.security.enabled=true" \
|
||||
-e "xpack.license.self_generated.type=trial" \
|
||||
-e "xpack.security.http.ssl.enabled=true" \
|
||||
-e "xpack.security.http.ssl.verification_mode=certificate" \
|
||||
-e "xpack.security.http.ssl.key=certs/testnode.key" \
|
||||
-e "xpack.security.http.ssl.certificate=certs/testnode.crt" \
|
||||
-e "xpack.security.http.ssl.certificate_authorities=certs/ca.crt" \
|
||||
-e "xpack.security.transport.ssl.enabled=true" \
|
||||
-e "xpack.security.transport.ssl.key=certs/testnode.key" \
|
||||
-e "xpack.security.transport.ssl.certificate=certs/testnode.crt" \
|
||||
-e "xpack.security.transport.ssl.certificate_authorities=certs/ca.crt" \
|
||||
-v "$repo$testnodecrt:/usr/share/elasticsearch/config/certs/testnode.crt" \
|
||||
-v "$repo$testnodekey:/usr/share/elasticsearch/config/certs/testnode.key" \
|
||||
-v "$repo$cacrt:/usr/share/elasticsearch/config/certs/ca.crt" \
|
||||
-p 9200:9200 \
|
||||
docker.elastic.co/elasticsearch/elasticsearch:6.7.1
|
||||
# pass `--clean` to reemove the old snapshot
|
||||
if [ "$1" == "--clean" ]; then
|
||||
docker rmi $(docker images --format '{{.Repository}}:{{.Tag}}' | grep '8.0.0-SNAPSHOT')
|
||||
fi
|
||||
|
||||
# Create the 'elastic' network if doesn't exist
|
||||
exec docker network ls | grep elastic > /dev/null || docker network create elastic > /dev/null
|
||||
|
||||
if [ "$1" == "--detach" ]; then
|
||||
exec docker run \
|
||||
--rm \
|
||||
-e "node.attr.testattr=test" \
|
||||
-e "path.repo=/tmp" \
|
||||
-e "repositories.url.allowed_urls=http://snapshot.*" \
|
||||
-e "discovery.type=single-node" \
|
||||
-e "ES_JAVA_OPTS=-Xms1g -Xmx1g" \
|
||||
-e "ELASTIC_PASSWORD=changeme" \
|
||||
-e "xpack.security.enabled=true" \
|
||||
-e "xpack.license.self_generated.type=trial" \
|
||||
-e "xpack.security.http.ssl.enabled=true" \
|
||||
-e "xpack.security.http.ssl.verification_mode=certificate" \
|
||||
-e "xpack.security.http.ssl.key=certs/testnode.key" \
|
||||
-e "xpack.security.http.ssl.certificate=certs/testnode.crt" \
|
||||
-e "xpack.security.http.ssl.certificate_authorities=certs/ca.crt" \
|
||||
-e "xpack.security.transport.ssl.enabled=true" \
|
||||
-e "xpack.security.transport.ssl.key=certs/testnode.key" \
|
||||
-e "xpack.security.transport.ssl.certificate=certs/testnode.crt" \
|
||||
-e "xpack.security.transport.ssl.certificate_authorities=certs/ca.crt" \
|
||||
-v "$repo$testnodecrt:/usr/share/elasticsearch/config/certs/testnode.crt" \
|
||||
-v "$repo$testnodekey:/usr/share/elasticsearch/config/certs/testnode.key" \
|
||||
-v "$repo$cacrt:/usr/share/elasticsearch/config/certs/ca.crt" \
|
||||
-p 9200:9200 \
|
||||
--detach \
|
||||
--network=elastic \
|
||||
--name=elasticsearch \
|
||||
docker.elastic.co/elasticsearch/elasticsearch:6.8.2
|
||||
else
|
||||
exec docker run \
|
||||
--rm \
|
||||
-e "node.attr.testattr=test" \
|
||||
-e "path.repo=/tmp" \
|
||||
-e "repositories.url.allowed_urls=http://snapshot.*" \
|
||||
-e "discovery.type=single-node" \
|
||||
-e "ES_JAVA_OPTS=-Xms1g -Xmx1g" \
|
||||
-e "ELASTIC_PASSWORD=changeme" \
|
||||
-e "xpack.security.enabled=true" \
|
||||
-e "xpack.license.self_generated.type=trial" \
|
||||
-e "xpack.security.http.ssl.enabled=true" \
|
||||
-e "xpack.security.http.ssl.verification_mode=certificate" \
|
||||
-e "xpack.security.http.ssl.key=certs/testnode.key" \
|
||||
-e "xpack.security.http.ssl.certificate=certs/testnode.crt" \
|
||||
-e "xpack.security.http.ssl.certificate_authorities=certs/ca.crt" \
|
||||
-e "xpack.security.transport.ssl.enabled=true" \
|
||||
-e "xpack.security.transport.ssl.key=certs/testnode.key" \
|
||||
-e "xpack.security.transport.ssl.certificate=certs/testnode.crt" \
|
||||
-e "xpack.security.transport.ssl.certificate_authorities=certs/ca.crt" \
|
||||
-v "$repo$testnodecrt:/usr/share/elasticsearch/config/certs/testnode.crt" \
|
||||
-v "$repo$testnodekey:/usr/share/elasticsearch/config/certs/testnode.key" \
|
||||
-v "$repo$cacrt:/usr/share/elasticsearch/config/certs/ca.crt" \
|
||||
-p 9200:9200 \
|
||||
--network=elastic \
|
||||
--name=elasticsearch \
|
||||
docker.elastic.co/elasticsearch/elasticsearch:6.8.2
|
||||
fi
|
||||
|
||||
@ -1,12 +1,38 @@
|
||||
#!/bin/bash
|
||||
|
||||
exec docker run \
|
||||
--rm \
|
||||
-e "node.attr.testattr=test" \
|
||||
-e "path.repo=/tmp" \
|
||||
-e "repositories.url.allowed_urls=http://snapshot.*" \
|
||||
-e "discovery.type=single-node" \
|
||||
-p 9200:9200 \
|
||||
--network=elastic \
|
||||
--name=elasticsearch \
|
||||
docker.elastic.co/elasticsearch/elasticsearch:6.7.1
|
||||
# Images are cached locally, it may be needed
|
||||
# to delete an old image and download again
|
||||
# the latest snapshot.
|
||||
|
||||
# pass `--clean` to reemove the old snapshot
|
||||
if [ "$1" == "--clean" ]; then
|
||||
docker rmi $(docker images --format '{{.Repository}}:{{.Tag}}' | grep '8.0.0-SNAPSHOT')
|
||||
fi
|
||||
|
||||
# Create the 'elastic' network if doesn't exist
|
||||
exec docker network ls | grep elastic > /dev/null || docker network create elastic > /dev/null
|
||||
|
||||
if [ "$1" == "--detach" ]; then
|
||||
exec docker run \
|
||||
--rm \
|
||||
-e "node.attr.testattr=test" \
|
||||
-e "path.repo=/tmp" \
|
||||
-e "repositories.url.allowed_urls=http://snapshot.*" \
|
||||
-e "discovery.type=single-node" \
|
||||
-p 9200:9200 \
|
||||
--detach \
|
||||
--network=elastic \
|
||||
--name=elasticsearch \
|
||||
docker.elastic.co/elasticsearch/elasticsearch:6.8.2
|
||||
else
|
||||
exec docker run \
|
||||
--rm \
|
||||
-e "node.attr.testattr=test" \
|
||||
-e "path.repo=/tmp" \
|
||||
-e "repositories.url.allowed_urls=http://snapshot.*" \
|
||||
-e "discovery.type=single-node" \
|
||||
-p 9200:9200 \
|
||||
--network=elastic \
|
||||
--name=elasticsearch \
|
||||
docker.elastic.co/elasticsearch/elasticsearch:6.8.2
|
||||
fi
|
||||
|
||||
@ -20,7 +20,7 @@
|
||||
'use strict'
|
||||
|
||||
const { join } = require('path')
|
||||
const { readdirSync, writeFileSync } = require('fs')
|
||||
const { readdirSync, readFileSync, writeFileSync } = require('fs')
|
||||
const minimist = require('minimist')
|
||||
const semver = require('semver')
|
||||
const ora = require('ora')
|
||||
@ -35,19 +35,19 @@ const {
|
||||
} = require('./utils')
|
||||
|
||||
start(minimist(process.argv.slice(2), {
|
||||
string: ['tag']
|
||||
string: ['tag', 'branch']
|
||||
}))
|
||||
|
||||
function start (opts) {
|
||||
const log = ora('Loading Elasticsearch Repository').start()
|
||||
if (semver.valid(opts.tag) === null) {
|
||||
if (opts.branch == null && semver.valid(opts.tag) === null) {
|
||||
log.fail(`Missing or invalid tag: ${opts.tag}`)
|
||||
return
|
||||
}
|
||||
const packageFolder = join(__dirname, '..', 'api')
|
||||
const apiOutputFolder = join(packageFolder, 'api')
|
||||
const mainOutputFile = join(packageFolder, 'index.js')
|
||||
const typesOutputFile = join(packageFolder, 'generated.d.ts')
|
||||
const typeDefFile = join(__dirname, '..', 'index.d.ts')
|
||||
const docOutputFile = join(__dirname, '..', 'docs', 'reference.asciidoc')
|
||||
const requestParamsOutputFile = join(packageFolder, 'requestParams.d.ts')
|
||||
const allSpec = []
|
||||
@ -55,7 +55,7 @@ function start (opts) {
|
||||
log.text = 'Cleaning API folder...'
|
||||
rimraf.sync(join(apiOutputFolder, '*.js'))
|
||||
|
||||
cloneAndCheckout({ log, tag: opts.tag }, (err, { apiFolder, xPackFolder }) => {
|
||||
cloneAndCheckout({ log, tag: opts.tag, branch: opts.branch }, (err, { apiFolder, xPackFolder }) => {
|
||||
if (err) {
|
||||
log.fail(err.message)
|
||||
return
|
||||
@ -79,9 +79,14 @@ function start (opts) {
|
||||
factory,
|
||||
{ encoding: 'utf8' }
|
||||
)
|
||||
|
||||
const oldTypeDefString = readFileSync(typeDefFile, 'utf8')
|
||||
const start = oldTypeDefString.indexOf('/* GENERATED */')
|
||||
const end = oldTypeDefString.indexOf('/* /GENERATED */')
|
||||
const newTypeDefString = oldTypeDefString.slice(0, start + 15) + '\n' + types + '\n ' + oldTypeDefString.slice(end)
|
||||
writeFileSync(
|
||||
typesOutputFile,
|
||||
types,
|
||||
typeDefFile,
|
||||
newTypeDefString,
|
||||
{ encoding: 'utf8' }
|
||||
)
|
||||
|
||||
@ -97,7 +102,6 @@ function start (opts) {
|
||||
)
|
||||
|
||||
log.succeed('Done!')
|
||||
console.log('Remember to copy the generated types into the index.d.ts file')
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@ -29,7 +29,7 @@ const apiFolder = join(esFolder, 'rest-api-spec', 'src', 'main', 'resources', 'r
|
||||
const xPackFolder = join(esFolder, 'x-pack', 'plugin', 'src', 'test', 'resources', 'rest-api-spec', 'api')
|
||||
|
||||
function cloneAndCheckout (opts, callback) {
|
||||
const { log, tag } = opts
|
||||
const { log, tag, branch } = opts
|
||||
withTag(tag, callback)
|
||||
|
||||
/**
|
||||
@ -57,13 +57,19 @@ function cloneAndCheckout (opts, callback) {
|
||||
|
||||
if (fresh) {
|
||||
clone(checkout)
|
||||
} else if (opts.branch) {
|
||||
checkout(true)
|
||||
} else {
|
||||
checkout()
|
||||
}
|
||||
|
||||
function checkout () {
|
||||
log.text = `Checking out tag '${tag}'`
|
||||
git.checkout(tag, err => {
|
||||
function checkout (alsoPull = false) {
|
||||
if (branch) {
|
||||
log.text = `Checking out branch '${branch}'`
|
||||
} else {
|
||||
log.text = `Checking out tag '${tag}'`
|
||||
}
|
||||
git.checkout(branch || tag, err => {
|
||||
if (err) {
|
||||
if (retry++ > 0) {
|
||||
callback(new Error(`Cannot checkout tag '${tag}'`), { apiFolder, xPackFolder })
|
||||
@ -71,6 +77,9 @@ function cloneAndCheckout (opts, callback) {
|
||||
}
|
||||
return pull(checkout)
|
||||
}
|
||||
if (alsoPull) {
|
||||
return pull(checkout)
|
||||
}
|
||||
callback(null, { apiFolder, xPackFolder })
|
||||
})
|
||||
}
|
||||
|
||||
@ -29,7 +29,9 @@ function generateDocs (common, spec) {
|
||||
////////
|
||||
|
||||
This documentation is generated by running:
|
||||
node scripts/run.js --tag v7.0.0-beta
|
||||
node scripts/run.js --tag tagName
|
||||
or
|
||||
node scripts/run.js --branch branchName
|
||||
|
||||
////////\n\n`
|
||||
doc += commonParameters(common)
|
||||
@ -109,11 +111,23 @@ function generateApiDoc (spec) {
|
||||
})
|
||||
}
|
||||
|
||||
const codeParameters = params
|
||||
.reduce((acc, val) => {
|
||||
var code = `${val.name}: ${val.type},`
|
||||
acc += acc === ''
|
||||
? code
|
||||
: '\n ' + code
|
||||
|
||||
return acc
|
||||
}, '')
|
||||
// remove last comma
|
||||
.slice(0, -1)
|
||||
|
||||
var doc = dedent`
|
||||
=== ${camelify(name)}
|
||||
[source,js]
|
||||
[source,ts]
|
||||
----
|
||||
client.${camelify(name)}([params] [, options] [, callback])
|
||||
client.${camelify(name)}(${codeParameters.length > 0 ? `{\n ${codeParameters}\n}` : ''})
|
||||
----\n`
|
||||
if (documentationUrl) {
|
||||
doc += `link:${documentationUrl}[Reference]\n`
|
||||
@ -128,7 +142,7 @@ function generateApiDoc (spec) {
|
||||
: '`' + val.name + '`'
|
||||
acc += dedent`
|
||||
|${name}
|
||||
|${'`' + val.type + '`'} - ${val.description}`
|
||||
|${'`' + val.type.replace(/\|/g, '\\|') + '`'} - ${val.description}`
|
||||
if (val.default) {
|
||||
acc += ` +\n_Default:_ ${'`' + val.default + '`'}`
|
||||
}
|
||||
@ -180,13 +194,13 @@ function fixLink (name, str) {
|
||||
function getType (type, options) {
|
||||
switch (type) {
|
||||
case 'list':
|
||||
return 'string, string[]'
|
||||
return 'string | string[]'
|
||||
case 'date':
|
||||
case 'time':
|
||||
case 'timeout':
|
||||
return 'string'
|
||||
case 'enum':
|
||||
return options.map(k => `'${k}'`).join(', ')
|
||||
return options.map(k => `'${k}'`).join(' | ')
|
||||
case 'int':
|
||||
case 'double':
|
||||
case 'long':
|
||||
|
||||
@ -88,7 +88,13 @@ function genFactory (folder) {
|
||||
|
||||
// serialize the type object
|
||||
const typesStr = Object.keys(types)
|
||||
.map(key => `${key}: ${JSON.stringify(types[key], null, 2)}`)
|
||||
.map(key => {
|
||||
const line = ` ${key}: ${JSON.stringify(types[key], null, 4)}`
|
||||
if (line.slice(-1) === '}') {
|
||||
return line.slice(0, -1) + ' }'
|
||||
}
|
||||
return line
|
||||
})
|
||||
.join('\n')
|
||||
// remove useless quotes and commas
|
||||
.replace(/"/g, '')
|
||||
|
||||
@ -64,8 +64,7 @@ test('Should execute the recurrect API with the ping strategy', t => {
|
||||
})
|
||||
|
||||
q.add((q, done) => {
|
||||
cluster.kill('node0')
|
||||
setTimeout(done, 100)
|
||||
cluster.kill('node0', done)
|
||||
})
|
||||
|
||||
q.add((q, done) => {
|
||||
@ -98,10 +97,9 @@ test('Resurrect a node and handle 502/3/4 status code', t => {
|
||||
|
||||
var count = 0
|
||||
function handler (req, res) {
|
||||
res.statusCode = count < 2 ? 502 : 200
|
||||
res.statusCode = count++ < 2 ? 502 : 200
|
||||
res.setHeader('content-type', 'application/json')
|
||||
res.end(JSON.stringify({ hello: 'world' }))
|
||||
count++
|
||||
}
|
||||
|
||||
buildCluster({ handler, numberOfNodes: 2 }, ({ nodes, shutdown }) => {
|
||||
@ -123,7 +121,7 @@ test('Resurrect a node and handle 502/3/4 status code', t => {
|
||||
t.strictEqual(meta.connection.id, 'node0')
|
||||
t.strictEqual(meta.name, 'elasticsearch-js')
|
||||
t.deepEqual(meta.request, { id: idCount++ })
|
||||
if (count < 3) {
|
||||
if (count < 4) {
|
||||
t.false(meta.isAlive)
|
||||
} else {
|
||||
t.true(meta.isAlive)
|
||||
@ -189,8 +187,7 @@ test('Should execute the recurrect API with the optimistic strategy', t => {
|
||||
})
|
||||
|
||||
q.add((q, done) => {
|
||||
cluster.kill('node0')
|
||||
setTimeout(done, 100)
|
||||
cluster.kill('node0', done)
|
||||
})
|
||||
|
||||
q.add((q, done) => {
|
||||
|
||||
@ -21,6 +21,8 @@
|
||||
|
||||
const { test } = require('tap')
|
||||
const { URL } = require('url')
|
||||
const lolex = require('lolex')
|
||||
const workq = require('workq')
|
||||
const { buildCluster } = require('../utils')
|
||||
const { Client, Connection, Transport, events, errors } = require('../../index')
|
||||
|
||||
@ -41,7 +43,7 @@ test('Should update the connection pool', t => {
|
||||
const client = new Client({
|
||||
node: nodes[Object.keys(nodes)[0]].url
|
||||
})
|
||||
t.strictEqual(client.connectionPool.connections.size, 1)
|
||||
t.strictEqual(client.connectionPool.size, 1)
|
||||
|
||||
client.on(events.SNIFF, (err, request) => {
|
||||
t.error(err)
|
||||
@ -87,7 +89,7 @@ test('Should update the connection pool', t => {
|
||||
}
|
||||
}
|
||||
|
||||
t.strictEqual(client.connectionPool.connections.size, 4)
|
||||
t.strictEqual(client.connectionPool.size, 4)
|
||||
})
|
||||
t.teardown(shutdown)
|
||||
})
|
||||
@ -100,7 +102,7 @@ test('Should handle hostnames in publish_address', t => {
|
||||
const client = new Client({
|
||||
node: nodes[Object.keys(nodes)[0]].url
|
||||
})
|
||||
t.strictEqual(client.connectionPool.connections.size, 1)
|
||||
t.strictEqual(client.connectionPool.size, 1)
|
||||
|
||||
client.on(events.SNIFF, (err, request) => {
|
||||
t.error(err)
|
||||
@ -120,14 +122,16 @@ test('Should handle hostnames in publish_address', t => {
|
||||
t.strictEqual(hosts[i].url.hostname, 'localhost')
|
||||
}
|
||||
|
||||
t.strictEqual(client.connectionPool.connections.size, 4)
|
||||
t.strictEqual(client.connectionPool.size, 4)
|
||||
})
|
||||
t.teardown(shutdown)
|
||||
})
|
||||
})
|
||||
|
||||
test('Sniff interval', t => {
|
||||
t.plan(10)
|
||||
t.plan(11)
|
||||
const clock = lolex.install({ toFake: ['Date'] })
|
||||
const q = workq()
|
||||
|
||||
buildCluster(({ nodes, shutdown, kill }) => {
|
||||
const client = new Client({
|
||||
@ -140,26 +144,54 @@ test('Sniff interval', t => {
|
||||
t.error(err)
|
||||
const { hosts, reason } = request.meta.sniff
|
||||
t.strictEqual(
|
||||
client.connectionPool.connections.size,
|
||||
client.connectionPool.size,
|
||||
hosts.length
|
||||
)
|
||||
t.strictEqual(reason, Transport.sniffReasons.SNIFF_INTERVAL)
|
||||
})
|
||||
|
||||
t.strictEqual(client.connectionPool.connections.size, 1)
|
||||
setTimeout(() => client.info(t.error), 60)
|
||||
t.strictEqual(client.connectionPool.size, 1)
|
||||
|
||||
setTimeout(() => {
|
||||
// let's kill a node
|
||||
kill('node1')
|
||||
client.info(t.error)
|
||||
}, 150)
|
||||
q.add((q, done) => {
|
||||
clock.tick(51)
|
||||
client.info(err => {
|
||||
t.error(err)
|
||||
waitSniffEnd(() => {
|
||||
t.strictEqual(client.connectionPool.size, 4)
|
||||
done()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
setTimeout(() => {
|
||||
t.strictEqual(client.connectionPool.connections.size, 3)
|
||||
}, 200)
|
||||
q.add((q, done) => {
|
||||
kill('node1', done)
|
||||
})
|
||||
|
||||
q.add((q, done) => {
|
||||
clock.tick(51)
|
||||
client.info(err => {
|
||||
t.error(err)
|
||||
waitSniffEnd(() => {
|
||||
t.strictEqual(client.connectionPool.size, 3)
|
||||
done()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
t.teardown(shutdown)
|
||||
|
||||
// it can happen that the sniff operation resolves
|
||||
// after the API call that trioggered it, so to
|
||||
// be sure that we are checking the connectionPool size
|
||||
// at the right moment, we verify that the transport
|
||||
// is no longer sniffing
|
||||
function waitSniffEnd (callback) {
|
||||
if (client.transport._isSniffing) {
|
||||
setTimeout(waitSniffEnd, 500, callback)
|
||||
} else {
|
||||
callback()
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
@ -176,13 +208,13 @@ test('Sniff on start', t => {
|
||||
t.error(err)
|
||||
const { hosts, reason } = request.meta.sniff
|
||||
t.strictEqual(
|
||||
client.connectionPool.connections.size,
|
||||
client.connectionPool.size,
|
||||
hosts.length
|
||||
)
|
||||
t.strictEqual(reason, Transport.sniffReasons.SNIFF_ON_START)
|
||||
})
|
||||
|
||||
t.strictEqual(client.connectionPool.connections.size, 1)
|
||||
t.strictEqual(client.connectionPool.size, 1)
|
||||
t.teardown(shutdown)
|
||||
})
|
||||
})
|
||||
@ -205,11 +237,11 @@ test('Should not close living connections', t => {
|
||||
Connection: MyConnection
|
||||
})
|
||||
|
||||
t.strictEqual(client.connectionPool.connections.size, 1)
|
||||
t.strictEqual(client.connectionPool.size, 1)
|
||||
client.transport.sniff((err, hosts) => {
|
||||
t.error(err)
|
||||
t.strictEqual(
|
||||
client.connectionPool.connections.size,
|
||||
client.connectionPool.size,
|
||||
hosts.length
|
||||
)
|
||||
})
|
||||
@ -243,13 +275,13 @@ test('Sniff on connection fault', t => {
|
||||
Connection: MyConnection
|
||||
})
|
||||
|
||||
t.strictEqual(client.connectionPool.connections.size, 2)
|
||||
t.strictEqual(client.connectionPool.size, 2)
|
||||
// this event will be triggered by the connection fault
|
||||
client.on(events.SNIFF, (err, request) => {
|
||||
t.error(err)
|
||||
const { hosts, reason } = request.meta.sniff
|
||||
t.strictEqual(
|
||||
client.connectionPool.connections.size,
|
||||
client.connectionPool.size,
|
||||
hosts.length
|
||||
)
|
||||
t.strictEqual(reason, Transport.sniffReasons.SNIFF_ON_CONNECTION_FAULT)
|
||||
|
||||
@ -56,11 +56,11 @@ const esDefaultUsers = [
|
||||
'remote_monitoring_user'
|
||||
]
|
||||
|
||||
function runInParallel (client, operation, options) {
|
||||
function runInParallel (client, operation, options, clientOptions) {
|
||||
if (options.length === 0) return Promise.resolve()
|
||||
const operations = options.map(opts => {
|
||||
const api = delve(client, operation).bind(client)
|
||||
return api(opts)
|
||||
return api(opts, clientOptions)
|
||||
})
|
||||
|
||||
return Promise.all(operations)
|
||||
@ -80,4 +80,10 @@ function delve (obj, key, def, p) {
|
||||
return (obj === undefined || p < key.length) ? def : obj
|
||||
}
|
||||
|
||||
module.exports = { runInParallel, esDefaultRoles, esDefaultUsers, delve }
|
||||
function to (promise) {
|
||||
return promise.then(data => [null, data], err => [err, undefined])
|
||||
}
|
||||
|
||||
const sleep = ms => new Promise(resolve => setTimeout(resolve, ms))
|
||||
|
||||
module.exports = { runInParallel, esDefaultRoles, esDefaultUsers, delve, to, sleep }
|
||||
|
||||
@ -19,29 +19,32 @@
|
||||
|
||||
'use strict'
|
||||
|
||||
const assert = require('assert')
|
||||
const { readFileSync, accessSync, mkdirSync, readdirSync, statSync } = require('fs')
|
||||
const { join, sep } = require('path')
|
||||
const yaml = require('js-yaml')
|
||||
const Git = require('simple-git')
|
||||
const ora = require('ora')
|
||||
const tap = require('tap')
|
||||
const { Client } = require('../../index')
|
||||
const TestRunner = require('./test-runner')
|
||||
const { sleep } = require('./helper')
|
||||
|
||||
const esRepo = 'https://github.com/elastic/elasticsearch.git'
|
||||
const esFolder = join(__dirname, '..', '..', 'elasticsearch')
|
||||
const yamlFolder = join(esFolder, 'rest-api-spec', 'src', 'main', 'resources', 'rest-api-spec', 'test')
|
||||
const xPackYamlFolder = join(esFolder, 'x-pack', 'plugin', 'src', 'test', 'resources', 'rest-api-spec', 'test')
|
||||
const customSkips = [
|
||||
|
||||
const ossSkips = {
|
||||
// TODO: remove this once 'arbitrary_key' is implemented
|
||||
// https://github.com/elastic/elasticsearch/pull/41492
|
||||
'indices.split/30_copy_settings.yml': ['*'],
|
||||
// skipping because we are booting ES with `discovery.type=single-node`
|
||||
// and this test will fail because of this configuration
|
||||
'nodes.stats/30_discovery.yml',
|
||||
'nodes.stats/30_discovery.yml': ['*'],
|
||||
// the expected error is returning a 503,
|
||||
// which triggers a retry and the node to be marked as dead
|
||||
'search.aggregation/240_max_buckets.yml'
|
||||
]
|
||||
const platinumBlackList = {
|
||||
'search.aggregation/240_max_buckets.yml': ['*']
|
||||
}
|
||||
const xPackBlackList = {
|
||||
// file path: test name
|
||||
'cat.aliases/10_basic.yml': ['Empty cluster'],
|
||||
'deprecation/10_basic.yml': ['Test Deprecations'],
|
||||
@ -74,279 +77,276 @@ const platinumBlackList = {
|
||||
'xpack/15_basic.yml': ['*']
|
||||
}
|
||||
|
||||
function Runner (opts) {
|
||||
if (!(this instanceof Runner)) {
|
||||
return new Runner(opts)
|
||||
}
|
||||
opts = opts || {}
|
||||
|
||||
assert(opts.node, 'Missing base node')
|
||||
this.bailout = opts.bailout
|
||||
const options = { node: opts.node }
|
||||
if (opts.isPlatinum) {
|
||||
options.ssl = {
|
||||
// NOTE: this path works only if we run
|
||||
// the suite with npm scripts
|
||||
ca: readFileSync('.ci/certs/ca.crt', 'utf8'),
|
||||
rejectUnauthorized: false
|
||||
}
|
||||
}
|
||||
this.client = new Client(options)
|
||||
this.log = ora('Loading yaml suite').start()
|
||||
}
|
||||
|
||||
Runner.prototype.waitCluster = function (callback, times = 0) {
|
||||
this.log.text = 'Waiting for ElasticSearch'
|
||||
this.client.cluster.health(
|
||||
{ waitForStatus: 'green', timeout: '50s' },
|
||||
(err, res) => {
|
||||
if (err && ++times < 10) {
|
||||
setTimeout(() => {
|
||||
this.waitCluster(callback, times)
|
||||
}, 5000)
|
||||
} else {
|
||||
callback(err)
|
||||
class Runner {
|
||||
constructor (opts = {}) {
|
||||
const options = { node: opts.node }
|
||||
if (opts.isXPack) {
|
||||
options.ssl = {
|
||||
ca: readFileSync(join(__dirname, '..', '..', '.ci', 'certs', 'ca.crt'), 'utf8'),
|
||||
rejectUnauthorized: false
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
this.client = new Client(options)
|
||||
console.log('Loading yaml suite')
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs the test suite
|
||||
*/
|
||||
Runner.prototype.start = function (opts) {
|
||||
const parse = this.parse.bind(this)
|
||||
const client = this.client
|
||||
|
||||
// client.on('response', (err, meta) => {
|
||||
// console.log('Request', meta.request)
|
||||
// if (err) {
|
||||
// console.log('Error', err)
|
||||
// } else {
|
||||
// console.log('Response', JSON.stringify(meta.response, null, 2))
|
||||
// }
|
||||
// console.log()
|
||||
// })
|
||||
|
||||
this.waitCluster(err => {
|
||||
if (err) {
|
||||
this.log.fail(err.message)
|
||||
async waitCluster (client, times = 0) {
|
||||
try {
|
||||
await client.cluster.health({ waitForStatus: 'green', timeout: '50s' })
|
||||
} catch (err) {
|
||||
if (++times < 10) {
|
||||
await sleep(5000)
|
||||
return this.waitCluster(client, times)
|
||||
}
|
||||
console.error(err)
|
||||
process.exit(1)
|
||||
}
|
||||
// Get the build hash of Elasticsearch
|
||||
client.info((err, { body }) => {
|
||||
if (err) {
|
||||
this.log.fail(err.message)
|
||||
process.exit(1)
|
||||
}
|
||||
const { number: version, build_hash: sha } = body.version
|
||||
}
|
||||
|
||||
// Set the repository to the given sha and run the test suite
|
||||
this.withSHA(sha, () => {
|
||||
this.log.succeed(`Testing ${opts.isPlatinum ? 'platinum' : 'oss'} api...`)
|
||||
runTest.call(this, version)
|
||||
})
|
||||
})
|
||||
})
|
||||
async start ({ isXPack }) {
|
||||
const { client } = this
|
||||
const parse = this.parse.bind(this)
|
||||
|
||||
function runTest (version) {
|
||||
const files = []
|
||||
console.log('Waiting for Elasticsearch')
|
||||
await this.waitCluster(client)
|
||||
|
||||
const { body } = await client.info()
|
||||
const { number: version, build_hash: sha } = body.version
|
||||
|
||||
console.log(`Checking out sha ${sha}...`)
|
||||
await this.withSHA(sha)
|
||||
|
||||
console.log(`Testing ${isXPack ? 'XPack' : 'oss'} api...`)
|
||||
|
||||
const folders = []
|
||||
.concat(getAllFiles(yamlFolder))
|
||||
.concat(opts.isPlatinum ? getAllFiles(xPackYamlFolder) : [])
|
||||
.concat(isXPack ? getAllFiles(xPackYamlFolder) : [])
|
||||
.filter(t => !/(README|TODO)/g.test(t))
|
||||
// we cluster the array based on the folder names,
|
||||
// to provide a better test log output
|
||||
.reduce((arr, file) => {
|
||||
const path = file.slice(file.indexOf('/rest-api-spec/test'), file.lastIndexOf('/'))
|
||||
var inserted = false
|
||||
for (var i = 0; i < arr.length; i++) {
|
||||
if (arr[i][0].includes(path)) {
|
||||
inserted = true
|
||||
arr[i].push(file)
|
||||
break
|
||||
}
|
||||
}
|
||||
if (!inserted) arr.push([file])
|
||||
return arr
|
||||
}, [])
|
||||
|
||||
files.forEach(runTestFile.bind(this))
|
||||
function runTestFile (file) {
|
||||
// if (!file.endsWith('watcher/execute_watch/70_invalid.yml')) return
|
||||
for (var i = 0; i < customSkips.length; i++) {
|
||||
if (file.endsWith(customSkips[i])) return
|
||||
}
|
||||
// create a subtest for the specific folder
|
||||
tap.test(file.slice(file.indexOf(`${sep}elasticsearch${sep}`)), { jobs: 1 }, tap1 => {
|
||||
// read the yaml file
|
||||
const data = readFileSync(file, 'utf8')
|
||||
// get the test yaml (as object), some file has multiple yaml documents inside,
|
||||
// every document is separated by '---', so we split on the separator
|
||||
// and then we remove the empty strings, finally we parse them
|
||||
const tests = data
|
||||
.split('\n---\n')
|
||||
.map(s => s.trim())
|
||||
.filter(Boolean)
|
||||
.map(parse)
|
||||
for (const folder of folders) {
|
||||
// pretty name
|
||||
const apiName = folder[0].slice(
|
||||
folder[0].indexOf(`${sep}rest-api-spec${sep}test`) + 19,
|
||||
folder[0].lastIndexOf(sep)
|
||||
)
|
||||
|
||||
tap.test(`Testing ${apiName}`, { bail: true, timeout: 0 }, t => {
|
||||
for (const file of folder) {
|
||||
const data = readFileSync(file, 'utf8')
|
||||
// get the test yaml (as object), some file has multiple yaml documents inside,
|
||||
// every document is separated by '---', so we split on the separator
|
||||
// and then we remove the empty strings, finally we parse them
|
||||
const tests = data
|
||||
.split('\n---\n')
|
||||
.map(s => s.trim())
|
||||
.filter(Boolean)
|
||||
.map(parse)
|
||||
|
||||
t.test(
|
||||
file.slice(file.lastIndexOf(apiName)),
|
||||
testFile(file, tests)
|
||||
)
|
||||
}
|
||||
t.end()
|
||||
})
|
||||
}
|
||||
|
||||
function testFile (file, tests) {
|
||||
return t => {
|
||||
// get setup and teardown if present
|
||||
var setupTest = null
|
||||
var teardownTest = null
|
||||
tests.forEach(test => {
|
||||
for (const test of tests) {
|
||||
if (test.setup) setupTest = test.setup
|
||||
if (test.teardown) teardownTest = test.teardown
|
||||
})
|
||||
}
|
||||
|
||||
// run the tests
|
||||
tests.forEach(test => {
|
||||
const name = Object.keys(test)[0]
|
||||
if (name === 'setup' || name === 'teardown') return
|
||||
// should skip the test inside `platinumBlackList`
|
||||
// if we are testing the platinum apis
|
||||
if (opts.isPlatinum) {
|
||||
const list = Object.keys(platinumBlackList)
|
||||
for (i = 0; i < list.length; i++) {
|
||||
const platTest = platinumBlackList[list[i]]
|
||||
for (var j = 0; j < platTest.length; j++) {
|
||||
if (file.endsWith(list[i]) && (name === platTest[j] || platTest[j] === '*')) {
|
||||
const testName = file.slice(file.indexOf(`${sep}elasticsearch${sep}`)) + ' / ' + name
|
||||
tap.skip(`Skipping test ${testName} because is blacklisted in the platinum test`)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (shouldSkip(t, isXPack, file, name)) return
|
||||
|
||||
// create a subtest for the specific folder + test file + test name
|
||||
tap1.test(name, { jobs: 1, bail: this.bailout }, tap2 => {
|
||||
const testRunner = TestRunner({
|
||||
t.test(name, async t => {
|
||||
const testRunner = new TestRunner({
|
||||
client,
|
||||
version,
|
||||
tap: tap2,
|
||||
isPlatinum: file.includes('x-pack')
|
||||
tap: t,
|
||||
isXPack: file.includes('x-pack')
|
||||
})
|
||||
testRunner.run(setupTest, test[name], teardownTest, () => tap2.end())
|
||||
await testRunner.run(setupTest, test[name], teardownTest)
|
||||
})
|
||||
})
|
||||
|
||||
tap1.end()
|
||||
})
|
||||
t.end()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a given yaml document
|
||||
* @param {string} yaml document
|
||||
* @returns {object}
|
||||
*/
|
||||
Runner.prototype.parse = function (data) {
|
||||
try {
|
||||
var doc = yaml.safeLoad(data)
|
||||
} catch (err) {
|
||||
this.log.fail(err.message)
|
||||
return
|
||||
}
|
||||
return doc
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the filtered content of a given folder
|
||||
* @param {string} folder
|
||||
* @returns {Array} The content of the given folder
|
||||
*/
|
||||
Runner.prototype.getTest = function (folder) {
|
||||
const tests = readdirSync(folder)
|
||||
return tests.filter(t => !/(README|TODO)/g.test(t))
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the elasticsearch repository to the given sha.
|
||||
* If the repository is not present in `esFolder` it will
|
||||
* clone the repository and the checkout the sha.
|
||||
* If the repository is already present but it cannot checkout to
|
||||
* the given sha, it will perform a pull and then try again.
|
||||
* @param {string} sha
|
||||
* @param {function} callback
|
||||
*/
|
||||
Runner.prototype.withSHA = function (sha, callback) {
|
||||
var fresh = false
|
||||
var retry = 0
|
||||
var log = this.log
|
||||
|
||||
if (!this.pathExist(esFolder)) {
|
||||
if (!this.createFolder(esFolder)) {
|
||||
log.fail('Failed folder creation')
|
||||
parse (data) {
|
||||
try {
|
||||
var doc = yaml.safeLoad(data)
|
||||
} catch (err) {
|
||||
console.error(err)
|
||||
return
|
||||
}
|
||||
fresh = true
|
||||
return doc
|
||||
}
|
||||
|
||||
const git = Git(esFolder)
|
||||
|
||||
if (fresh) {
|
||||
clone(checkout)
|
||||
} else {
|
||||
checkout()
|
||||
getTest (folder) {
|
||||
const tests = readdirSync(folder)
|
||||
return tests.filter(t => !/(README|TODO)/g.test(t))
|
||||
}
|
||||
|
||||
function checkout () {
|
||||
log.text = `Checking out sha '${sha}'`
|
||||
git.checkout(sha, err => {
|
||||
if (err) {
|
||||
if (retry++ > 0) {
|
||||
log.fail(`Cannot checkout sha '${sha}'`)
|
||||
return
|
||||
/**
|
||||
* Sets the elasticsearch repository to the given sha.
|
||||
* If the repository is not present in `esFolder` it will
|
||||
* clone the repository and the checkout the sha.
|
||||
* If the repository is already present but it cannot checkout to
|
||||
* the given sha, it will perform a pull and then try again.
|
||||
* @param {string} sha
|
||||
* @param {function} callback
|
||||
*/
|
||||
withSHA (sha) {
|
||||
return new Promise((resolve, reject) => {
|
||||
_withSHA.call(this, err => err ? reject(err) : resolve())
|
||||
})
|
||||
|
||||
function _withSHA (callback) {
|
||||
var fresh = false
|
||||
var retry = 0
|
||||
|
||||
if (!this.pathExist(esFolder)) {
|
||||
if (!this.createFolder(esFolder)) {
|
||||
return callback(new Error('Failed folder creation'))
|
||||
}
|
||||
return pull(checkout)
|
||||
fresh = true
|
||||
}
|
||||
callback()
|
||||
})
|
||||
}
|
||||
|
||||
function pull (cb) {
|
||||
log.text = 'Pulling elasticsearch repository...'
|
||||
git.pull(err => {
|
||||
if (err) {
|
||||
log.fail(err.message)
|
||||
return
|
||||
const git = Git(esFolder)
|
||||
|
||||
if (fresh) {
|
||||
clone(checkout)
|
||||
} else {
|
||||
checkout()
|
||||
}
|
||||
cb()
|
||||
})
|
||||
}
|
||||
|
||||
function clone (cb) {
|
||||
log.text = 'Cloning elasticsearch repository...'
|
||||
git.clone(esRepo, esFolder, err => {
|
||||
if (err) {
|
||||
log.fail(err.message)
|
||||
return
|
||||
function checkout () {
|
||||
console.log(`Checking out sha '${sha}'`)
|
||||
git.checkout(sha, err => {
|
||||
if (err) {
|
||||
if (retry++ > 0) {
|
||||
return callback(err)
|
||||
}
|
||||
return pull(checkout)
|
||||
}
|
||||
callback()
|
||||
})
|
||||
}
|
||||
cb()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the given path exists
|
||||
* @param {string} path
|
||||
* @returns {boolean} true if exists, false if not
|
||||
*/
|
||||
Runner.prototype.pathExist = function (path) {
|
||||
try {
|
||||
accessSync(path)
|
||||
return true
|
||||
} catch (err) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
function pull (cb) {
|
||||
console.log('Pulling elasticsearch repository...')
|
||||
git.pull(err => {
|
||||
if (err) {
|
||||
return callback(err)
|
||||
}
|
||||
cb()
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates the given folder
|
||||
* @param {string} name
|
||||
* @returns {boolean} true on success, false on failure
|
||||
*/
|
||||
Runner.prototype.createFolder = function (name) {
|
||||
try {
|
||||
mkdirSync(name)
|
||||
return true
|
||||
} catch (err) {
|
||||
return false
|
||||
function clone (cb) {
|
||||
console.log('Cloning elasticsearch repository...')
|
||||
git.clone(esRepo, esFolder, err => {
|
||||
if (err) {
|
||||
return callback(err)
|
||||
}
|
||||
cb()
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the given path exists
|
||||
* @param {string} path
|
||||
* @returns {boolean} true if exists, false if not
|
||||
*/
|
||||
pathExist (path) {
|
||||
try {
|
||||
accessSync(path)
|
||||
return true
|
||||
} catch (err) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates the given folder
|
||||
* @param {string} name
|
||||
* @returns {boolean} true on success, false on failure
|
||||
*/
|
||||
createFolder (name) {
|
||||
try {
|
||||
mkdirSync(name)
|
||||
return true
|
||||
} catch (err) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (require.main === module) {
|
||||
const url = process.env.TEST_ES_SERVER || 'http://localhost:9200'
|
||||
const node = process.env.TEST_ES_SERVER || 'http://localhost:9200'
|
||||
const opts = {
|
||||
node: url,
|
||||
isPlatinum: url.indexOf('@') > -1
|
||||
node,
|
||||
isXPack: node.indexOf('@') > -1
|
||||
}
|
||||
const runner = Runner(opts)
|
||||
runner.start(opts)
|
||||
const runner = new Runner(opts)
|
||||
runner.start(opts).catch(console.log)
|
||||
}
|
||||
|
||||
const shouldSkip = (t, isXPack, file, name) => {
|
||||
var list = Object.keys(ossSkips)
|
||||
for (var i = 0; i < list.length; i++) {
|
||||
const ossTest = ossSkips[list[i]]
|
||||
for (var j = 0; j < ossTest.length; j++) {
|
||||
if (file.endsWith(list[i]) && (name === ossTest[j] || ossTest[j] === '*')) {
|
||||
const testName = file.slice(file.indexOf(`${sep}elasticsearch${sep}`)) + ' / ' + name
|
||||
t.comment(`Skipping test ${testName} because is blacklisted in the oss test`)
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (file.includes('x-pack') || isXPack) {
|
||||
list = Object.keys(xPackBlackList)
|
||||
for (i = 0; i < list.length; i++) {
|
||||
const platTest = xPackBlackList[list[i]]
|
||||
for (j = 0; j < platTest.length; j++) {
|
||||
if (file.endsWith(list[i]) && (name === platTest[j] || platTest[j] === '*')) {
|
||||
const testName = file.slice(file.indexOf(`${sep}elasticsearch${sep}`)) + ' / ' + name
|
||||
t.comment(`Skipping test ${testName} because is blacklisted in the XPack test`)
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
const getAllFiles = dir =>
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -27,13 +27,49 @@ import {
|
||||
ResurrectEvent,
|
||||
events,
|
||||
errors,
|
||||
ClientExtendsCallbackOptions
|
||||
ClientExtendsCallbackOptions,
|
||||
NodeOptions
|
||||
} from '../../index'
|
||||
|
||||
import { TransportRequestParams, TransportRequestOptions } from '../../lib/Transport'
|
||||
import { URL } from 'url'
|
||||
|
||||
const client = new Client({ node: 'http://localhost:9200' })
|
||||
|
||||
const nodeOpts: NodeOptions = {
|
||||
url: new URL('http://localhost:9200'),
|
||||
id: 'winteriscoming',
|
||||
headers: { 'foo': 'bar' },
|
||||
roles: {
|
||||
master: false,
|
||||
data: true,
|
||||
ingest: false,
|
||||
ml: false
|
||||
}
|
||||
}
|
||||
|
||||
const client2 = new Client({ node: nodeOpts })
|
||||
|
||||
const clientBasicAuth = new Client({
|
||||
node: 'http://localhost:9200',
|
||||
auth: { username: 'foo', password: 'bar' }
|
||||
})
|
||||
|
||||
const clientApiKeyString = new Client({
|
||||
node: 'http://localhost:9200',
|
||||
auth: { apiKey: 'foobar' }
|
||||
})
|
||||
|
||||
const clientApiKeyObject = new Client({
|
||||
node: 'http://localhost:9200',
|
||||
auth: {
|
||||
apiKey: {
|
||||
id: 'foo',
|
||||
api_key: 'bar'
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
client.on(events.RESPONSE, (err: errors.ElasticsearchClientError | null, request: RequestEvent) => {
|
||||
if (err) console.log(err)
|
||||
const { body, statusCode } = request
|
||||
@ -112,7 +148,7 @@ const searchParams: RequestParams.Search<SearchBody> = {
|
||||
}
|
||||
}
|
||||
|
||||
// Dewfine the interface of the search response
|
||||
// Define the interface of the search response
|
||||
interface SearchResponse<T> {
|
||||
hits: {
|
||||
hits: Array<{
|
||||
@ -121,7 +157,7 @@ interface SearchResponse<T> {
|
||||
}
|
||||
}
|
||||
|
||||
// Define the intefrace of the source object
|
||||
// Define the interface of the source object
|
||||
interface Source {
|
||||
foo: string
|
||||
}
|
||||
|
||||
505
test/unit/base-connection-pool.test.js
Normal file
505
test/unit/base-connection-pool.test.js
Normal file
@ -0,0 +1,505 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch B.V. under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch B.V. licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
'use strict'
|
||||
|
||||
const { test } = require('tap')
|
||||
const { URL } = require('url')
|
||||
const BaseConnectionPool = require('../../lib/pool/BaseConnectionPool')
|
||||
const Connection = require('../../lib/Connection')
|
||||
|
||||
test('API', t => {
|
||||
t.test('addConnection', t => {
|
||||
const pool = new BaseConnectionPool({ Connection })
|
||||
const href = 'http://localhost:9200/'
|
||||
pool.addConnection(href)
|
||||
t.ok(pool.connections.find(c => c.id === href) instanceof Connection)
|
||||
t.strictEqual(pool.connections.find(c => c.id === href).status, Connection.statuses.ALIVE)
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('addConnection should throw with two connections with the same id', t => {
|
||||
const pool = new BaseConnectionPool({ Connection })
|
||||
const href = 'http://localhost:9200/'
|
||||
pool.addConnection(href)
|
||||
try {
|
||||
pool.addConnection(href)
|
||||
t.fail('Should throw')
|
||||
} catch (err) {
|
||||
t.is(err.message, `Connection with id '${href}' is already present`)
|
||||
}
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('addConnection should handle not-friendly url parameters for user and password', t => {
|
||||
const pool = new BaseConnectionPool({ Connection })
|
||||
const href = 'http://us"er:p@assword@localhost:9200/'
|
||||
pool.addConnection(href)
|
||||
const conn = pool.connections[0]
|
||||
t.strictEqual(conn.url.username, 'us%22er')
|
||||
t.strictEqual(conn.url.password, 'p%40assword')
|
||||
t.match(conn.headers, {
|
||||
authorization: 'Basic ' + Buffer.from('us"er:p@assword').toString('base64')
|
||||
})
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('markDead', t => {
|
||||
const pool = new BaseConnectionPool({ Connection, sniffEnabled: true })
|
||||
const href = 'http://localhost:9200/'
|
||||
var connection = pool.addConnection(href)
|
||||
t.same(pool.markDead(connection), pool)
|
||||
connection = pool.connections.find(c => c.id === href)
|
||||
t.strictEqual(connection.status, Connection.statuses.ALIVE)
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('markAlive', t => {
|
||||
const pool = new BaseConnectionPool({ Connection, sniffEnabled: true })
|
||||
const href = 'http://localhost:9200/'
|
||||
var connection = pool.addConnection(href)
|
||||
t.same(pool.markAlive(connection), pool)
|
||||
connection = pool.connections.find(c => c.id === href)
|
||||
t.strictEqual(connection.status, Connection.statuses.ALIVE)
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('getConnection should throw', t => {
|
||||
const pool = new BaseConnectionPool({ Connection })
|
||||
const href = 'http://localhost:9200/'
|
||||
pool.addConnection(href)
|
||||
try {
|
||||
pool.getConnection()
|
||||
t.fail('Should fail')
|
||||
} catch (err) {
|
||||
t.is(err.message, 'getConnection must be implemented')
|
||||
}
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('removeConnection', t => {
|
||||
const pool = new BaseConnectionPool({ Connection })
|
||||
const href = 'http://localhost:9200/'
|
||||
var connection = pool.addConnection(href)
|
||||
pool.removeConnection(connection)
|
||||
t.strictEqual(pool.size, 0)
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('empty', t => {
|
||||
const pool = new BaseConnectionPool({ Connection })
|
||||
pool.addConnection('http://localhost:9200/')
|
||||
pool.addConnection('http://localhost:9201/')
|
||||
pool.empty(() => {
|
||||
t.strictEqual(pool.size, 0)
|
||||
t.end()
|
||||
})
|
||||
})
|
||||
|
||||
t.test('urlToHost', t => {
|
||||
const pool = new BaseConnectionPool({ Connection })
|
||||
const url = 'http://localhost:9200'
|
||||
t.deepEqual(
|
||||
pool.urlToHost(url),
|
||||
{ url: new URL(url) }
|
||||
)
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('nodesToHost', t => {
|
||||
t.test('publish_address as ip address (IPv4)', t => {
|
||||
const pool = new BaseConnectionPool({ Connection })
|
||||
const nodes = {
|
||||
a1: {
|
||||
http: {
|
||||
publish_address: '127.0.0.1:9200'
|
||||
},
|
||||
roles: ['master', 'data', 'ingest']
|
||||
},
|
||||
a2: {
|
||||
http: {
|
||||
publish_address: '127.0.0.1:9201'
|
||||
},
|
||||
roles: ['master', 'data', 'ingest']
|
||||
}
|
||||
}
|
||||
|
||||
t.deepEqual(pool.nodesToHost(nodes, 'http:'), [{
|
||||
url: new URL('http://127.0.0.1:9200'),
|
||||
id: 'a1',
|
||||
roles: {
|
||||
master: true,
|
||||
data: true,
|
||||
ingest: true,
|
||||
ml: false
|
||||
}
|
||||
}, {
|
||||
url: new URL('http://127.0.0.1:9201'),
|
||||
id: 'a2',
|
||||
roles: {
|
||||
master: true,
|
||||
data: true,
|
||||
ingest: true,
|
||||
ml: false
|
||||
}
|
||||
}])
|
||||
|
||||
t.strictEqual(pool.nodesToHost(nodes, 'http:')[0].url.host, '127.0.0.1:9200')
|
||||
t.strictEqual(pool.nodesToHost(nodes, 'http:')[1].url.host, '127.0.0.1:9201')
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('publish_address as ip address (IPv6)', t => {
|
||||
const pool = new BaseConnectionPool({ Connection })
|
||||
const nodes = {
|
||||
a1: {
|
||||
http: {
|
||||
publish_address: '[::1]:9200'
|
||||
},
|
||||
roles: ['master', 'data', 'ingest']
|
||||
},
|
||||
a2: {
|
||||
http: {
|
||||
publish_address: '[::1]:9201'
|
||||
},
|
||||
roles: ['master', 'data', 'ingest']
|
||||
}
|
||||
}
|
||||
|
||||
t.deepEqual(pool.nodesToHost(nodes, 'http:'), [{
|
||||
url: new URL('http://[::1]:9200'),
|
||||
id: 'a1',
|
||||
roles: {
|
||||
master: true,
|
||||
data: true,
|
||||
ingest: true,
|
||||
ml: false
|
||||
}
|
||||
}, {
|
||||
url: new URL('http://[::1]:9201'),
|
||||
id: 'a2',
|
||||
roles: {
|
||||
master: true,
|
||||
data: true,
|
||||
ingest: true,
|
||||
ml: false
|
||||
}
|
||||
}])
|
||||
|
||||
t.strictEqual(pool.nodesToHost(nodes, 'http:')[0].url.host, '[::1]:9200')
|
||||
t.strictEqual(pool.nodesToHost(nodes, 'http:')[1].url.host, '[::1]:9201')
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('publish_address as host/ip (IPv4)', t => {
|
||||
const pool = new BaseConnectionPool({ Connection })
|
||||
const nodes = {
|
||||
a1: {
|
||||
http: {
|
||||
publish_address: 'example.com/127.0.0.1:9200'
|
||||
},
|
||||
roles: ['master', 'data', 'ingest']
|
||||
},
|
||||
a2: {
|
||||
http: {
|
||||
publish_address: 'example.com/127.0.0.1:9201'
|
||||
},
|
||||
roles: ['master', 'data', 'ingest']
|
||||
}
|
||||
}
|
||||
|
||||
t.deepEqual(pool.nodesToHost(nodes, 'http:'), [{
|
||||
url: new URL('http://example.com:9200'),
|
||||
id: 'a1',
|
||||
roles: {
|
||||
master: true,
|
||||
data: true,
|
||||
ingest: true,
|
||||
ml: false
|
||||
}
|
||||
}, {
|
||||
url: new URL('http://example.com:9201'),
|
||||
id: 'a2',
|
||||
roles: {
|
||||
master: true,
|
||||
data: true,
|
||||
ingest: true,
|
||||
ml: false
|
||||
}
|
||||
}])
|
||||
|
||||
t.strictEqual(pool.nodesToHost(nodes, 'http:')[0].url.host, 'example.com:9200')
|
||||
t.strictEqual(pool.nodesToHost(nodes, 'http:')[1].url.host, 'example.com:9201')
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('publish_address as host/ip (IPv6)', t => {
|
||||
const pool = new BaseConnectionPool({ Connection })
|
||||
const nodes = {
|
||||
a1: {
|
||||
http: {
|
||||
publish_address: 'example.com/[::1]:9200'
|
||||
},
|
||||
roles: ['master', 'data', 'ingest']
|
||||
},
|
||||
a2: {
|
||||
http: {
|
||||
publish_address: 'example.com/[::1]:9201'
|
||||
},
|
||||
roles: ['master', 'data', 'ingest']
|
||||
}
|
||||
}
|
||||
|
||||
t.deepEqual(pool.nodesToHost(nodes, 'http:'), [{
|
||||
url: new URL('http://example.com:9200'),
|
||||
id: 'a1',
|
||||
roles: {
|
||||
master: true,
|
||||
data: true,
|
||||
ingest: true,
|
||||
ml: false
|
||||
}
|
||||
}, {
|
||||
url: new URL('http://example.com:9201'),
|
||||
id: 'a2',
|
||||
roles: {
|
||||
master: true,
|
||||
data: true,
|
||||
ingest: true,
|
||||
ml: false
|
||||
}
|
||||
}])
|
||||
|
||||
t.strictEqual(pool.nodesToHost(nodes, 'http:')[0].url.host, 'example.com:9200')
|
||||
t.strictEqual(pool.nodesToHost(nodes, 'http:')[1].url.host, 'example.com:9201')
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('Should use the configure protocol', t => {
|
||||
const pool = new BaseConnectionPool({ Connection })
|
||||
const nodes = {
|
||||
a1: {
|
||||
http: {
|
||||
publish_address: 'example.com/127.0.0.1:9200'
|
||||
},
|
||||
roles: ['master', 'data', 'ingest']
|
||||
},
|
||||
a2: {
|
||||
http: {
|
||||
publish_address: 'example.com/127.0.0.1:9201'
|
||||
},
|
||||
roles: ['master', 'data', 'ingest']
|
||||
}
|
||||
}
|
||||
|
||||
t.strictEqual(pool.nodesToHost(nodes, 'https:')[0].url.protocol, 'https:')
|
||||
t.strictEqual(pool.nodesToHost(nodes, 'http:')[1].url.protocol, 'http:')
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('update', t => {
|
||||
t.test('Should not update existing connections', t => {
|
||||
t.plan(2)
|
||||
const pool = new BaseConnectionPool({ Connection })
|
||||
pool.addConnection([{
|
||||
url: new URL('http://127.0.0.1:9200'),
|
||||
id: 'a1',
|
||||
roles: {
|
||||
master: true,
|
||||
data: true,
|
||||
ingest: true
|
||||
}
|
||||
}, {
|
||||
url: new URL('http://127.0.0.1:9201'),
|
||||
id: 'a2',
|
||||
roles: {
|
||||
master: true,
|
||||
data: true,
|
||||
ingest: true
|
||||
}
|
||||
}])
|
||||
|
||||
pool.update([{
|
||||
url: new URL('http://127.0.0.1:9200'),
|
||||
id: 'a1',
|
||||
roles: null
|
||||
}, {
|
||||
url: new URL('http://127.0.0.1:9201'),
|
||||
id: 'a2',
|
||||
roles: null
|
||||
}])
|
||||
|
||||
t.ok(pool.connections.find(c => c.id === 'a1').roles !== null)
|
||||
t.ok(pool.connections.find(c => c.id === 'a2').roles !== null)
|
||||
})
|
||||
|
||||
t.test('Should not update existing connections (mark alive)', t => {
|
||||
t.plan(5)
|
||||
class CustomBaseConnectionPool extends BaseConnectionPool {
|
||||
markAlive (connection) {
|
||||
t.ok('called')
|
||||
super.markAlive(connection)
|
||||
}
|
||||
}
|
||||
const pool = new CustomBaseConnectionPool({ Connection })
|
||||
const conn1 = pool.addConnection({
|
||||
url: new URL('http://127.0.0.1:9200'),
|
||||
id: 'a1',
|
||||
roles: {
|
||||
master: true,
|
||||
data: true,
|
||||
ingest: true
|
||||
}
|
||||
})
|
||||
|
||||
const conn2 = pool.addConnection({
|
||||
url: new URL('http://127.0.0.1:9201'),
|
||||
id: 'a2',
|
||||
roles: {
|
||||
master: true,
|
||||
data: true,
|
||||
ingest: true
|
||||
}
|
||||
})
|
||||
|
||||
pool.markDead(conn1)
|
||||
pool.markDead(conn2)
|
||||
|
||||
pool.update([{
|
||||
url: new URL('http://127.0.0.1:9200'),
|
||||
id: 'a1',
|
||||
roles: null
|
||||
}, {
|
||||
url: new URL('http://127.0.0.1:9201'),
|
||||
id: 'a2',
|
||||
roles: null
|
||||
}])
|
||||
|
||||
t.ok(pool.connections.find(c => c.id === 'a1').roles !== null)
|
||||
t.ok(pool.connections.find(c => c.id === 'a2').roles !== null)
|
||||
})
|
||||
|
||||
t.test('Should not update existing connections (same url, different id)', t => {
|
||||
t.plan(3)
|
||||
class CustomBaseConnectionPool extends BaseConnectionPool {
|
||||
markAlive (connection) {
|
||||
t.ok('called')
|
||||
super.markAlive(connection)
|
||||
}
|
||||
}
|
||||
const pool = new CustomBaseConnectionPool({ Connection })
|
||||
pool.addConnection([{
|
||||
url: new URL('http://127.0.0.1:9200'),
|
||||
id: 'http://127.0.0.1:9200/',
|
||||
roles: {
|
||||
master: true,
|
||||
data: true,
|
||||
ingest: true
|
||||
}
|
||||
}])
|
||||
|
||||
pool.update([{
|
||||
url: new URL('http://127.0.0.1:9200'),
|
||||
id: 'a1',
|
||||
roles: true
|
||||
}])
|
||||
|
||||
// roles will never be updated, we only use it to do
|
||||
// a dummy check to see if the connection has been updated
|
||||
t.deepEqual(pool.connections.find(c => c.id === 'a1').roles, {
|
||||
master: true,
|
||||
data: true,
|
||||
ingest: true,
|
||||
ml: false
|
||||
})
|
||||
t.strictEqual(pool.connections.find(c => c.id === 'http://127.0.0.1:9200/'), undefined)
|
||||
})
|
||||
|
||||
t.test('Add a new connection', t => {
|
||||
t.plan(2)
|
||||
const pool = new BaseConnectionPool({ Connection })
|
||||
pool.addConnection({
|
||||
url: new URL('http://127.0.0.1:9200'),
|
||||
id: 'a1',
|
||||
roles: {
|
||||
master: true,
|
||||
data: true,
|
||||
ingest: true
|
||||
}
|
||||
})
|
||||
|
||||
pool.update([{
|
||||
url: new URL('http://127.0.0.1:9200'),
|
||||
id: 'a1',
|
||||
roles: null
|
||||
}, {
|
||||
url: new URL('http://127.0.0.1:9201'),
|
||||
id: 'a2',
|
||||
roles: null
|
||||
}])
|
||||
|
||||
t.ok(pool.connections.find(c => c.id === 'a1').roles !== null)
|
||||
t.ok(pool.connections.find(c => c.id === 'a2'))
|
||||
})
|
||||
|
||||
t.test('Remove old connections', t => {
|
||||
t.plan(3)
|
||||
const pool = new BaseConnectionPool({ Connection })
|
||||
pool.addConnection({
|
||||
url: new URL('http://127.0.0.1:9200'),
|
||||
id: 'a1',
|
||||
roles: null
|
||||
})
|
||||
|
||||
pool.update([{
|
||||
url: new URL('http://127.0.0.1:9200'),
|
||||
id: 'a2',
|
||||
roles: null
|
||||
}, {
|
||||
url: new URL('http://127.0.0.1:9201'),
|
||||
id: 'a3',
|
||||
roles: null
|
||||
}])
|
||||
|
||||
t.false(pool.connections.find(c => c.id === 'a1'))
|
||||
t.true(pool.connections.find(c => c.id === 'a2'))
|
||||
t.true(pool.connections.find(c => c.id === 'a3'))
|
||||
})
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('CreateConnection', t => {
|
||||
t.plan(1)
|
||||
const pool = new BaseConnectionPool({ Connection })
|
||||
const conn = pool.createConnection('http://localhost:9200')
|
||||
pool.connections.push(conn)
|
||||
try {
|
||||
pool.createConnection('http://localhost:9200')
|
||||
t.fail('Should throw')
|
||||
} catch (err) {
|
||||
t.is(err.message, 'Connection with id \'http://localhost:9200/\' is already present')
|
||||
}
|
||||
})
|
||||
|
||||
t.end()
|
||||
})
|
||||
@ -22,6 +22,7 @@
|
||||
const { test } = require('tap')
|
||||
const { URL } = require('url')
|
||||
const { Client, ConnectionPool, Transport } = require('../../index')
|
||||
const { CloudConnectionPool } = require('../../lib/pool')
|
||||
const { buildServer } = require('../utils')
|
||||
|
||||
test('Configure host', t => {
|
||||
@ -30,7 +31,7 @@ test('Configure host', t => {
|
||||
node: 'http://localhost:9200'
|
||||
})
|
||||
const pool = client.connectionPool
|
||||
t.match(pool.connections.get('http://localhost:9200/'), {
|
||||
t.match(pool.connections.find(c => c.id === 'http://localhost:9200/'), {
|
||||
url: new URL('http://localhost:9200'),
|
||||
id: 'http://localhost:9200/',
|
||||
ssl: null,
|
||||
@ -51,7 +52,7 @@ test('Configure host', t => {
|
||||
nodes: ['http://localhost:9200', 'http://localhost:9201']
|
||||
})
|
||||
const pool = client.connectionPool
|
||||
t.match(pool.connections.get('http://localhost:9200/'), {
|
||||
t.match(pool.connections.find(c => c.id === 'http://localhost:9200/'), {
|
||||
url: new URL('http://localhost:9200'),
|
||||
id: 'http://localhost:9200/',
|
||||
ssl: null,
|
||||
@ -64,7 +65,7 @@ test('Configure host', t => {
|
||||
ml: false
|
||||
}
|
||||
})
|
||||
t.match(pool.connections.get('http://localhost:9201/'), {
|
||||
t.match(pool.connections.find(c => c.id === 'http://localhost:9201/'), {
|
||||
url: new URL('http://localhost:9201'),
|
||||
id: 'http://localhost:9201/',
|
||||
ssl: null,
|
||||
@ -95,7 +96,7 @@ test('Configure host', t => {
|
||||
}
|
||||
})
|
||||
const pool = client.connectionPool
|
||||
t.match(pool.connections.get('node'), {
|
||||
t.match(pool.connections.find(c => c.id === 'node'), {
|
||||
url: new URL('http://localhost:9200'),
|
||||
id: 'node',
|
||||
ssl: 'ssl',
|
||||
@ -103,7 +104,7 @@ test('Configure host', t => {
|
||||
resurrectTimeout: 0
|
||||
})
|
||||
|
||||
t.deepEqual(pool.connections.get('node').roles, {
|
||||
t.deepEqual(pool.connections.find(c => c.id === 'node').roles, {
|
||||
master: true,
|
||||
data: false,
|
||||
ingest: false,
|
||||
@ -136,7 +137,7 @@ test('Configure host', t => {
|
||||
}]
|
||||
})
|
||||
const pool = client.connectionPool
|
||||
t.match(pool.connections.get('node1'), {
|
||||
t.match(pool.connections.find(c => c.id === 'node1'), {
|
||||
url: new URL('http://localhost:9200'),
|
||||
id: 'node1',
|
||||
ssl: 'ssl',
|
||||
@ -144,14 +145,14 @@ test('Configure host', t => {
|
||||
resurrectTimeout: 0
|
||||
})
|
||||
|
||||
t.deepEqual(pool.connections.get('node1').roles, {
|
||||
t.deepEqual(pool.connections.find(c => c.id === 'node1').roles, {
|
||||
master: true,
|
||||
data: false,
|
||||
ingest: false,
|
||||
ml: false
|
||||
})
|
||||
|
||||
t.match(pool.connections.get('node2'), {
|
||||
t.match(pool.connections.find(c => c.id === 'node2'), {
|
||||
url: new URL('http://localhost:9200'),
|
||||
id: 'node2',
|
||||
ssl: 'ssl',
|
||||
@ -159,7 +160,7 @@ test('Configure host', t => {
|
||||
resurrectTimeout: 0
|
||||
})
|
||||
|
||||
t.deepEqual(pool.connections.get('node2').roles, {
|
||||
t.deepEqual(pool.connections.find(c => c.id === 'node2').roles, {
|
||||
master: false,
|
||||
data: true,
|
||||
ingest: false,
|
||||
@ -178,7 +179,7 @@ test('Configure host', t => {
|
||||
}
|
||||
})
|
||||
const pool = client.connectionPool
|
||||
t.match(pool.connections.get('node'), {
|
||||
t.match(pool.connections.find(c => c.id === 'node'), {
|
||||
url: new URL('http://localhost:9200'),
|
||||
headers: { 'x-foo': 'bar' }
|
||||
})
|
||||
@ -198,63 +199,349 @@ test('Configure host', t => {
|
||||
t.end()
|
||||
})
|
||||
|
||||
test('Node with auth data in the url', t => {
|
||||
t.plan(3)
|
||||
test('Authentication', t => {
|
||||
t.test('Basic', t => {
|
||||
t.test('Node with basic auth data in the url', t => {
|
||||
t.plan(3)
|
||||
|
||||
function handler (req, res) {
|
||||
t.match(req.headers, {
|
||||
authorization: 'Basic Zm9vOmJhcg=='
|
||||
})
|
||||
res.setHeader('Content-Type', 'application/json;utf=8')
|
||||
res.end(JSON.stringify({ hello: 'world' }))
|
||||
}
|
||||
|
||||
buildServer(handler, ({ port }, server) => {
|
||||
const client = new Client({
|
||||
node: `http://foo:bar@localhost:${port}`
|
||||
})
|
||||
|
||||
client.info((err, { body }) => {
|
||||
t.error(err)
|
||||
t.deepEqual(body, { hello: 'world' })
|
||||
server.stop()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
test('Custom authentication per request', t => {
|
||||
t.plan(6)
|
||||
|
||||
var first = true
|
||||
function handler (req, res) {
|
||||
t.match(req.headers, {
|
||||
authorization: first ? 'hello' : 'Basic Zm9vOmJhcg=='
|
||||
})
|
||||
res.setHeader('Content-Type', 'application/json;utf=8')
|
||||
res.end(JSON.stringify({ hello: 'world' }))
|
||||
}
|
||||
|
||||
buildServer(handler, ({ port }, server) => {
|
||||
const client = new Client({
|
||||
node: `http://foo:bar@localhost:${port}`
|
||||
})
|
||||
|
||||
client.info({}, {
|
||||
headers: {
|
||||
authorization: 'hello'
|
||||
function handler (req, res) {
|
||||
t.match(req.headers, {
|
||||
authorization: 'Basic Zm9vOmJhcg=='
|
||||
})
|
||||
res.setHeader('Content-Type', 'application/json;utf=8')
|
||||
res.end(JSON.stringify({ hello: 'world' }))
|
||||
}
|
||||
}, (err, { body }) => {
|
||||
t.error(err)
|
||||
t.deepEqual(body, { hello: 'world' })
|
||||
first = false
|
||||
|
||||
client.info((err, { body }) => {
|
||||
t.error(err)
|
||||
t.deepEqual(body, { hello: 'world' })
|
||||
server.stop()
|
||||
buildServer(handler, ({ port }, server) => {
|
||||
const client = new Client({
|
||||
node: `http://foo:bar@localhost:${port}`
|
||||
})
|
||||
|
||||
client.info((err, { body }) => {
|
||||
t.error(err)
|
||||
t.deepEqual(body, { hello: 'world' })
|
||||
server.stop()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
t.test('Node with basic auth data in the url (array of nodes)', t => {
|
||||
t.plan(3)
|
||||
|
||||
function handler (req, res) {
|
||||
t.match(req.headers, {
|
||||
authorization: 'Basic Zm9vOmJhcg=='
|
||||
})
|
||||
res.setHeader('Content-Type', 'application/json;utf=8')
|
||||
res.end(JSON.stringify({ hello: 'world' }))
|
||||
}
|
||||
|
||||
buildServer(handler, ({ port }, server) => {
|
||||
const client = new Client({
|
||||
nodes: [`http://foo:bar@localhost:${port}`]
|
||||
})
|
||||
|
||||
client.info((err, { body }) => {
|
||||
t.error(err)
|
||||
t.deepEqual(body, { hello: 'world' })
|
||||
server.stop()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
t.test('Node with basic auth data in the options', t => {
|
||||
t.plan(3)
|
||||
|
||||
function handler (req, res) {
|
||||
t.match(req.headers, {
|
||||
authorization: 'Basic Zm9vOmJhcg=='
|
||||
})
|
||||
res.setHeader('Content-Type', 'application/json;utf=8')
|
||||
res.end(JSON.stringify({ hello: 'world' }))
|
||||
}
|
||||
|
||||
buildServer(handler, ({ port }, server) => {
|
||||
const client = new Client({
|
||||
node: `http://localhost:${port}`,
|
||||
auth: {
|
||||
username: 'foo',
|
||||
password: 'bar'
|
||||
}
|
||||
})
|
||||
|
||||
client.info((err, { body }) => {
|
||||
t.error(err)
|
||||
t.deepEqual(body, { hello: 'world' })
|
||||
server.stop()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
t.test('Custom basic authentication per request', t => {
|
||||
t.plan(6)
|
||||
|
||||
var first = true
|
||||
function handler (req, res) {
|
||||
t.match(req.headers, {
|
||||
authorization: first ? 'hello' : 'Basic Zm9vOmJhcg=='
|
||||
})
|
||||
res.setHeader('Content-Type', 'application/json;utf=8')
|
||||
res.end(JSON.stringify({ hello: 'world' }))
|
||||
}
|
||||
|
||||
buildServer(handler, ({ port }, server) => {
|
||||
const client = new Client({
|
||||
node: `http://foo:bar@localhost:${port}`
|
||||
})
|
||||
|
||||
client.info({}, {
|
||||
headers: {
|
||||
authorization: 'hello'
|
||||
}
|
||||
}, (err, { body }) => {
|
||||
t.error(err)
|
||||
t.deepEqual(body, { hello: 'world' })
|
||||
first = false
|
||||
|
||||
client.info((err, { body }) => {
|
||||
t.error(err)
|
||||
t.deepEqual(body, { hello: 'world' })
|
||||
server.stop()
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
t.test('Override default basic authentication per request', t => {
|
||||
t.plan(6)
|
||||
|
||||
var first = true
|
||||
function handler (req, res) {
|
||||
t.match(req.headers, {
|
||||
authorization: first ? 'hello' : 'Basic Zm9vOmJhcg=='
|
||||
})
|
||||
res.setHeader('Content-Type', 'application/json;utf=8')
|
||||
res.end(JSON.stringify({ hello: 'world' }))
|
||||
}
|
||||
|
||||
buildServer(handler, ({ port }, server) => {
|
||||
const client = new Client({
|
||||
node: `http://localhost:${port}`,
|
||||
auth: {
|
||||
username: 'foo',
|
||||
password: 'bar'
|
||||
}
|
||||
})
|
||||
|
||||
client.info({}, {
|
||||
headers: {
|
||||
authorization: 'hello'
|
||||
}
|
||||
}, (err, { body }) => {
|
||||
t.error(err)
|
||||
t.deepEqual(body, { hello: 'world' })
|
||||
first = false
|
||||
|
||||
client.info((err, { body }) => {
|
||||
t.error(err)
|
||||
t.deepEqual(body, { hello: 'world' })
|
||||
server.stop()
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('ApiKey', t => {
|
||||
t.test('Node with ApiKey auth data in the options as string', t => {
|
||||
t.plan(3)
|
||||
|
||||
function handler (req, res) {
|
||||
t.match(req.headers, {
|
||||
authorization: 'ApiKey Zm9vOmJhcg=='
|
||||
})
|
||||
res.setHeader('Content-Type', 'application/json;utf=8')
|
||||
res.end(JSON.stringify({ hello: 'world' }))
|
||||
}
|
||||
|
||||
buildServer(handler, ({ port }, server) => {
|
||||
const client = new Client({
|
||||
node: `http://localhost:${port}`,
|
||||
auth: {
|
||||
apiKey: 'Zm9vOmJhcg=='
|
||||
}
|
||||
})
|
||||
|
||||
client.info((err, { body }) => {
|
||||
t.error(err)
|
||||
t.deepEqual(body, { hello: 'world' })
|
||||
server.stop()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
t.test('Node with ApiKey auth data in the options as object', t => {
|
||||
t.plan(3)
|
||||
|
||||
function handler (req, res) {
|
||||
t.match(req.headers, {
|
||||
authorization: 'ApiKey Zm9vOmJhcg=='
|
||||
})
|
||||
res.setHeader('Content-Type', 'application/json;utf=8')
|
||||
res.end(JSON.stringify({ hello: 'world' }))
|
||||
}
|
||||
|
||||
buildServer(handler, ({ port }, server) => {
|
||||
const client = new Client({
|
||||
node: `http://localhost:${port}`,
|
||||
auth: {
|
||||
apiKey: { id: 'foo', api_key: 'bar' }
|
||||
}
|
||||
})
|
||||
|
||||
client.info((err, { body }) => {
|
||||
t.error(err)
|
||||
t.deepEqual(body, { hello: 'world' })
|
||||
server.stop()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
t.test('Custom ApiKey authentication per request', t => {
|
||||
t.plan(6)
|
||||
|
||||
var first = true
|
||||
function handler (req, res) {
|
||||
t.match(req.headers, {
|
||||
authorization: first ? 'ApiKey Zm9vOmJhcg==' : 'Basic Zm9vOmJhcg=='
|
||||
})
|
||||
res.setHeader('Content-Type', 'application/json;utf=8')
|
||||
res.end(JSON.stringify({ hello: 'world' }))
|
||||
}
|
||||
|
||||
buildServer(handler, ({ port }, server) => {
|
||||
const client = new Client({
|
||||
node: `http://foo:bar@localhost:${port}`
|
||||
})
|
||||
|
||||
client.info({}, {
|
||||
headers: {
|
||||
authorization: 'ApiKey Zm9vOmJhcg=='
|
||||
}
|
||||
}, (err, { body }) => {
|
||||
t.error(err)
|
||||
t.deepEqual(body, { hello: 'world' })
|
||||
first = false
|
||||
|
||||
client.info((err, { body }) => {
|
||||
t.error(err)
|
||||
t.deepEqual(body, { hello: 'world' })
|
||||
server.stop()
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
t.test('Override default ApiKey authentication per request', t => {
|
||||
t.plan(6)
|
||||
|
||||
var first = true
|
||||
function handler (req, res) {
|
||||
t.match(req.headers, {
|
||||
authorization: first ? 'hello' : 'ApiKey Zm9vOmJhcg=='
|
||||
})
|
||||
res.setHeader('Content-Type', 'application/json;utf=8')
|
||||
res.end(JSON.stringify({ hello: 'world' }))
|
||||
}
|
||||
|
||||
buildServer(handler, ({ port }, server) => {
|
||||
const client = new Client({
|
||||
node: `http://localhost:${port}`,
|
||||
auth: {
|
||||
apiKey: 'Zm9vOmJhcg=='
|
||||
}
|
||||
})
|
||||
|
||||
client.info({}, {
|
||||
headers: {
|
||||
authorization: 'hello'
|
||||
}
|
||||
}, (err, { body }) => {
|
||||
t.error(err)
|
||||
t.deepEqual(body, { hello: 'world' })
|
||||
first = false
|
||||
|
||||
client.info((err, { body }) => {
|
||||
t.error(err)
|
||||
t.deepEqual(body, { hello: 'world' })
|
||||
server.stop()
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
t.test('ApiKey should take precedence over basic auth (in url)', t => {
|
||||
t.plan(3)
|
||||
|
||||
function handler (req, res) {
|
||||
t.match(req.headers, {
|
||||
authorization: 'ApiKey Zm9vOmJhcg=='
|
||||
})
|
||||
res.setHeader('Content-Type', 'application/json;utf=8')
|
||||
res.end(JSON.stringify({ hello: 'world' }))
|
||||
}
|
||||
|
||||
buildServer(handler, ({ port }, server) => {
|
||||
const client = new Client({
|
||||
node: `http://user:pwd@localhost:${port}`,
|
||||
auth: {
|
||||
apiKey: 'Zm9vOmJhcg=='
|
||||
}
|
||||
})
|
||||
|
||||
client.info((err, { body }) => {
|
||||
t.error(err)
|
||||
t.deepEqual(body, { hello: 'world' })
|
||||
server.stop()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
t.test('ApiKey should take precedence over basic auth (in opts)', t => {
|
||||
t.plan(3)
|
||||
|
||||
function handler (req, res) {
|
||||
t.match(req.headers, {
|
||||
authorization: 'ApiKey Zm9vOmJhcg=='
|
||||
})
|
||||
res.setHeader('Content-Type', 'application/json;utf=8')
|
||||
res.end(JSON.stringify({ hello: 'world' }))
|
||||
}
|
||||
|
||||
buildServer(handler, ({ port }, server) => {
|
||||
const client = new Client({
|
||||
node: `http://localhost:${port}`,
|
||||
auth: {
|
||||
apiKey: 'Zm9vOmJhcg==',
|
||||
username: 'user',
|
||||
password: 'pwd'
|
||||
}
|
||||
})
|
||||
|
||||
client.info((err, { body }) => {
|
||||
t.error(err)
|
||||
t.deepEqual(body, { hello: 'world' })
|
||||
server.stop()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
test('Custom headers per request', t => {
|
||||
@ -540,7 +827,7 @@ test('Extend client APIs', t => {
|
||||
|
||||
test('Elastic cloud config', t => {
|
||||
t.test('Basic', t => {
|
||||
t.plan(4)
|
||||
t.plan(5)
|
||||
const client = new Client({
|
||||
cloud: {
|
||||
// 'localhost$abcd$efgh'
|
||||
@ -551,9 +838,88 @@ test('Elastic cloud config', t => {
|
||||
})
|
||||
|
||||
const pool = client.connectionPool
|
||||
t.match(pool.connections.get('https://abcd.localhost/'), {
|
||||
t.ok(pool instanceof CloudConnectionPool)
|
||||
t.match(pool.connections.find(c => c.id === 'https://abcd.localhost/'), {
|
||||
url: new URL('https://elastic:changeme@abcd.localhost'),
|
||||
id: 'https://abcd.localhost/',
|
||||
headers: {
|
||||
authorization: 'Basic ' + Buffer.from('elastic:changeme').toString('base64')
|
||||
},
|
||||
ssl: { secureProtocol: 'TLSv1_2_method' },
|
||||
deadCount: 0,
|
||||
resurrectTimeout: 0,
|
||||
roles: {
|
||||
master: true,
|
||||
data: true,
|
||||
ingest: true,
|
||||
ml: false
|
||||
}
|
||||
})
|
||||
|
||||
t.strictEqual(client.transport.compression, 'gzip')
|
||||
t.strictEqual(client.transport.suggestCompression, true)
|
||||
t.deepEqual(pool._ssl, { secureProtocol: 'TLSv1_2_method' })
|
||||
})
|
||||
|
||||
t.test('Auth as separate option', t => {
|
||||
t.plan(5)
|
||||
const client = new Client({
|
||||
cloud: {
|
||||
// 'localhost$abcd$efgh'
|
||||
id: 'name:bG9jYWxob3N0JGFiY2QkZWZnaA=='
|
||||
},
|
||||
auth: {
|
||||
username: 'elastic',
|
||||
password: 'changeme'
|
||||
}
|
||||
})
|
||||
|
||||
const pool = client.connectionPool
|
||||
t.ok(pool instanceof CloudConnectionPool)
|
||||
t.match(pool.connections.find(c => c.id === 'https://abcd.localhost/'), {
|
||||
url: new URL('https://elastic:changeme@abcd.localhost'),
|
||||
id: 'https://abcd.localhost/',
|
||||
headers: {
|
||||
authorization: 'Basic ' + Buffer.from('elastic:changeme').toString('base64')
|
||||
},
|
||||
ssl: { secureProtocol: 'TLSv1_2_method' },
|
||||
deadCount: 0,
|
||||
resurrectTimeout: 0,
|
||||
roles: {
|
||||
master: true,
|
||||
data: true,
|
||||
ingest: true,
|
||||
ml: false
|
||||
}
|
||||
})
|
||||
|
||||
t.strictEqual(client.transport.compression, 'gzip')
|
||||
t.strictEqual(client.transport.suggestCompression, true)
|
||||
t.deepEqual(pool._ssl, { secureProtocol: 'TLSv1_2_method' })
|
||||
})
|
||||
|
||||
t.test('ApiKey should take precedence over basic auth', t => {
|
||||
t.plan(5)
|
||||
const client = new Client({
|
||||
cloud: {
|
||||
// 'localhost$abcd$efgh'
|
||||
id: 'name:bG9jYWxob3N0JGFiY2QkZWZnaA=='
|
||||
},
|
||||
auth: {
|
||||
username: 'elastic',
|
||||
password: 'changeme',
|
||||
apiKey: 'Zm9vOmJhcg=='
|
||||
}
|
||||
})
|
||||
|
||||
const pool = client.connectionPool
|
||||
t.ok(pool instanceof CloudConnectionPool)
|
||||
t.match(pool.connections.find(c => c.id === 'https://abcd.localhost/'), {
|
||||
url: new URL('https://elastic:changeme@abcd.localhost'),
|
||||
id: 'https://abcd.localhost/',
|
||||
headers: {
|
||||
authorization: 'ApiKey Zm9vOmJhcg=='
|
||||
},
|
||||
ssl: { secureProtocol: 'TLSv1_2_method' },
|
||||
deadCount: 0,
|
||||
resurrectTimeout: 0,
|
||||
@ -571,7 +937,7 @@ test('Elastic cloud config', t => {
|
||||
})
|
||||
|
||||
t.test('Override default options', t => {
|
||||
t.plan(3)
|
||||
t.plan(4)
|
||||
const client = new Client({
|
||||
cloud: {
|
||||
// 'localhost$abcd$efgh'
|
||||
@ -586,6 +952,7 @@ test('Elastic cloud config', t => {
|
||||
}
|
||||
})
|
||||
|
||||
t.ok(client.connectionPool instanceof CloudConnectionPool)
|
||||
t.strictEqual(client.transport.compression, false)
|
||||
t.strictEqual(client.transport.suggestCompression, false)
|
||||
t.deepEqual(client.connectionPool._ssl, { secureProtocol: 'TLSv1_1_method' })
|
||||
@ -593,3 +960,87 @@ test('Elastic cloud config', t => {
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
test('Opaque Id support', t => {
|
||||
t.test('No opaqueId', t => {
|
||||
t.plan(3)
|
||||
|
||||
function handler (req, res) {
|
||||
t.strictEqual(req.headers['x-opaque-id'], undefined)
|
||||
res.setHeader('Content-Type', 'application/json;utf=8')
|
||||
res.end(JSON.stringify({ hello: 'world' }))
|
||||
}
|
||||
|
||||
buildServer(handler, ({ port }, server) => {
|
||||
const client = new Client({
|
||||
node: `http://localhost:${port}`
|
||||
})
|
||||
|
||||
client.search({
|
||||
index: 'test',
|
||||
q: 'foo:bar'
|
||||
}, (err, { body }) => {
|
||||
t.error(err)
|
||||
t.deepEqual(body, { hello: 'world' })
|
||||
server.stop()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
t.test('No prefix', t => {
|
||||
t.plan(3)
|
||||
|
||||
function handler (req, res) {
|
||||
t.strictEqual(req.headers['x-opaque-id'], 'bar')
|
||||
res.setHeader('Content-Type', 'application/json;utf=8')
|
||||
res.end(JSON.stringify({ hello: 'world' }))
|
||||
}
|
||||
|
||||
buildServer(handler, ({ port }, server) => {
|
||||
const client = new Client({
|
||||
node: `http://localhost:${port}`
|
||||
})
|
||||
|
||||
client.search({
|
||||
index: 'test',
|
||||
q: 'foo:bar'
|
||||
}, {
|
||||
opaqueId: 'bar'
|
||||
}, (err, { body }) => {
|
||||
t.error(err)
|
||||
t.deepEqual(body, { hello: 'world' })
|
||||
server.stop()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
t.test('With prefix', t => {
|
||||
t.plan(3)
|
||||
|
||||
function handler (req, res) {
|
||||
t.strictEqual(req.headers['x-opaque-id'], 'foo-bar')
|
||||
res.setHeader('Content-Type', 'application/json;utf=8')
|
||||
res.end(JSON.stringify({ hello: 'world' }))
|
||||
}
|
||||
|
||||
buildServer(handler, ({ port }, server) => {
|
||||
const client = new Client({
|
||||
node: `http://localhost:${port}`,
|
||||
opaqueIdPrefix: 'foo-'
|
||||
})
|
||||
|
||||
client.search({
|
||||
index: 'test',
|
||||
q: 'foo:bar'
|
||||
}, {
|
||||
opaqueId: 'bar'
|
||||
}, (err, { body }) => {
|
||||
t.error(err)
|
||||
t.deepEqual(body, { hello: 'world' })
|
||||
server.stop()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
48
test/unit/cloud-connection-pool.test.js
Normal file
48
test/unit/cloud-connection-pool.test.js
Normal file
@ -0,0 +1,48 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch B.V. under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch B.V. licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
'use strict'
|
||||
|
||||
const { test } = require('tap')
|
||||
const { CloudConnectionPool } = require('../../lib/pool')
|
||||
const Connection = require('../../lib/Connection')
|
||||
|
||||
test('Should expose a cloudConnection property', t => {
|
||||
const pool = new CloudConnectionPool({ Connection })
|
||||
pool.addConnection('http://localhost:9200/')
|
||||
t.ok(pool.cloudConnection instanceof Connection)
|
||||
t.end()
|
||||
})
|
||||
|
||||
test('Get connection should always return cloudConnection', t => {
|
||||
const pool = new CloudConnectionPool({ Connection })
|
||||
const conn = pool.addConnection('http://localhost:9200/')
|
||||
t.deepEqual(pool.getConnection(), conn)
|
||||
t.end()
|
||||
})
|
||||
|
||||
test('pool.empty should reset cloudConnection', t => {
|
||||
const pool = new CloudConnectionPool({ Connection })
|
||||
pool.addConnection('http://localhost:9200/')
|
||||
t.ok(pool.cloudConnection instanceof Connection)
|
||||
pool.empty(() => {
|
||||
t.strictEqual(pool.cloudConnection, null)
|
||||
t.end()
|
||||
})
|
||||
})
|
||||
@ -21,7 +21,7 @@
|
||||
|
||||
const { test } = require('tap')
|
||||
const { URL } = require('url')
|
||||
const ConnectionPool = require('../../lib/ConnectionPool')
|
||||
const ConnectionPool = require('../../lib/pool/ConnectionPool')
|
||||
const Connection = require('../../lib/Connection')
|
||||
const { defaultNodeFilter, roundRobinSelector } = require('../../lib/Transport').internals
|
||||
const { connection: { MockConnection, MockConnectionTimeout } } = require('../utils')
|
||||
@ -31,8 +31,8 @@ test('API', t => {
|
||||
const pool = new ConnectionPool({ Connection })
|
||||
const href = 'http://localhost:9200/'
|
||||
pool.addConnection(href)
|
||||
t.ok(pool.connections.get(href) instanceof Connection)
|
||||
t.strictEqual(pool.connections.get(href).status, Connection.statuses.ALIVE)
|
||||
t.ok(pool.connections.find(c => c.id === href) instanceof Connection)
|
||||
t.strictEqual(pool.connections.find(c => c.id === href).status, Connection.statuses.ALIVE)
|
||||
t.deepEqual(pool.dead, [])
|
||||
t.end()
|
||||
})
|
||||
@ -50,25 +50,6 @@ test('API', t => {
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('addConnection (should store the auth data)', t => {
|
||||
const pool = new ConnectionPool({ Connection })
|
||||
const href = 'http://localhost:9200/'
|
||||
pool.addConnection('http://foo:bar@localhost:9200')
|
||||
|
||||
t.ok(pool.connections.get(href) instanceof Connection)
|
||||
t.strictEqual(pool.connections.get(href).status, Connection.statuses.ALIVE)
|
||||
t.deepEqual(pool.dead, [])
|
||||
t.deepEqual(pool._auth, { username: 'foo', password: 'bar' })
|
||||
|
||||
pool.addConnection('http://localhost:9201')
|
||||
const conn = pool.connections.get('http://localhost:9201/')
|
||||
t.strictEqual(conn.url.username, 'foo')
|
||||
t.strictEqual(conn.url.password, 'bar')
|
||||
t.strictEqual(conn.auth.username, 'foo')
|
||||
t.strictEqual(conn.auth.password, 'bar')
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('addConnection should handle not-friendly url parameters for user and password', t => {
|
||||
const pool = new ConnectionPool({ Connection })
|
||||
const href = 'http://us"er:p@assword@localhost:9200/'
|
||||
@ -76,8 +57,9 @@ test('API', t => {
|
||||
const conn = pool.getConnection()
|
||||
t.strictEqual(conn.url.username, 'us%22er')
|
||||
t.strictEqual(conn.url.password, 'p%40assword')
|
||||
t.strictEqual(conn.auth.username, 'us"er')
|
||||
t.strictEqual(conn.auth.password, 'p@assword')
|
||||
t.match(conn.headers, {
|
||||
authorization: 'Basic ' + Buffer.from('us"er:p@assword').toString('base64')
|
||||
})
|
||||
t.end()
|
||||
})
|
||||
|
||||
@ -86,7 +68,7 @@ test('API', t => {
|
||||
const href = 'http://localhost:9200/'
|
||||
var connection = pool.addConnection(href)
|
||||
pool.markDead(connection)
|
||||
connection = pool.connections.get(href)
|
||||
connection = pool.connections.find(c => c.id === href)
|
||||
t.strictEqual(connection.deadCount, 1)
|
||||
t.true(connection.resurrectTimeout > 0)
|
||||
t.deepEqual(pool.dead, [href])
|
||||
@ -107,13 +89,21 @@ test('API', t => {
|
||||
}, 10)
|
||||
})
|
||||
|
||||
t.test('markDead should ignore connections that no longer exists', t => {
|
||||
const pool = new ConnectionPool({ Connection, sniffEnabled: true })
|
||||
pool.addConnection('http://localhost:9200/')
|
||||
pool.markDead({ id: 'foo-bar' })
|
||||
t.deepEqual(pool.dead, [])
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('markAlive', t => {
|
||||
const pool = new ConnectionPool({ Connection, sniffEnabled: true })
|
||||
const href = 'http://localhost:9200/'
|
||||
var connection = pool.addConnection(href)
|
||||
pool.markDead(connection)
|
||||
pool.markAlive(connection)
|
||||
connection = pool.connections.get(href)
|
||||
connection = pool.connections.find(c => c.id === href)
|
||||
t.strictEqual(connection.deadCount, 0)
|
||||
t.strictEqual(connection.resurrectTimeout, 0)
|
||||
t.strictEqual(connection.status, Connection.statuses.ALIVE)
|
||||
@ -140,7 +130,7 @@ test('API', t => {
|
||||
}
|
||||
pool.resurrect(opts, (isAlive, connection) => {
|
||||
t.true(isAlive)
|
||||
connection = pool.connections.get(connection.id)
|
||||
connection = pool.connections.find(c => c.id === connection.id)
|
||||
t.strictEqual(connection.deadCount, 0)
|
||||
t.strictEqual(connection.resurrectTimeout, 0)
|
||||
t.strictEqual(connection.status, Connection.statuses.ALIVE)
|
||||
@ -166,7 +156,7 @@ test('API', t => {
|
||||
}
|
||||
pool.resurrect(opts, (isAlive, connection) => {
|
||||
t.false(isAlive)
|
||||
connection = pool.connections.get(connection.id)
|
||||
connection = pool.connections.find(c => c.id === connection.id)
|
||||
t.strictEqual(connection.deadCount, 2)
|
||||
t.true(connection.resurrectTimeout > 0)
|
||||
t.strictEqual(connection.status, Connection.statuses.DEAD)
|
||||
@ -194,7 +184,7 @@ test('API', t => {
|
||||
}
|
||||
pool.resurrect(opts, (isAlive, connection) => {
|
||||
t.true(isAlive)
|
||||
connection = pool.connections.get(connection.id)
|
||||
connection = pool.connections.find(c => c.id === connection.id)
|
||||
t.strictEqual(connection.deadCount, 1)
|
||||
t.true(connection.resurrectTimeout > 0)
|
||||
t.strictEqual(connection.status, Connection.statuses.ALIVE)
|
||||
@ -220,7 +210,7 @@ test('API', t => {
|
||||
pool.resurrect(opts, (isAlive, connection) => {
|
||||
t.ok(isAlive === null)
|
||||
t.ok(connection === null)
|
||||
connection = pool.connections.get(href)
|
||||
connection = pool.connections.find(c => c.id === href)
|
||||
t.strictEqual(connection.deadCount, 1)
|
||||
t.true(connection.resurrectTimeout > 0)
|
||||
t.strictEqual(connection.status, Connection.statuses.DEAD)
|
||||
@ -282,6 +272,20 @@ test('API', t => {
|
||||
pool.getConnection({ filter })
|
||||
})
|
||||
|
||||
t.test('If all connections are marked as dead, getConnection should return a dead connection', t => {
|
||||
const pool = new ConnectionPool({ Connection })
|
||||
const href1 = 'http://localhost:9200/'
|
||||
const href2 = 'http://localhost:9200/other'
|
||||
const conn1 = pool.addConnection(href1)
|
||||
const conn2 = pool.addConnection(href2)
|
||||
pool.markDead(conn1)
|
||||
pool.markDead(conn2)
|
||||
const conn = pool.getConnection()
|
||||
t.ok(conn instanceof Connection)
|
||||
t.is(conn.status, 'dead')
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
@ -300,7 +304,7 @@ test('API', t => {
|
||||
pool.addConnection('http://localhost:9200/')
|
||||
pool.addConnection('http://localhost:9201/')
|
||||
pool.empty(() => {
|
||||
t.strictEqual(pool.connections.size, 0)
|
||||
t.strictEqual(pool.size, 0)
|
||||
t.deepEqual(pool.dead, [])
|
||||
t.end()
|
||||
})
|
||||
@ -507,18 +511,52 @@ test('API', t => {
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('Should map roles', t => {
|
||||
const pool = new ConnectionPool({ Connection })
|
||||
const nodes = {
|
||||
a1: {
|
||||
http: {
|
||||
publish_address: 'example.com:9200'
|
||||
},
|
||||
roles: ['master', 'data', 'ingest', 'ml']
|
||||
},
|
||||
a2: {
|
||||
http: {
|
||||
publish_address: 'example.com:9201'
|
||||
},
|
||||
roles: []
|
||||
}
|
||||
}
|
||||
t.same(pool.nodesToHost(nodes, 'http:'), [{
|
||||
url: new URL('http://example.com:9200'),
|
||||
id: 'a1',
|
||||
roles: {
|
||||
master: true,
|
||||
data: true,
|
||||
ingest: true,
|
||||
ml: true
|
||||
}
|
||||
}, {
|
||||
url: new URL('http://example.com:9201'),
|
||||
id: 'a2',
|
||||
roles: {
|
||||
master: false,
|
||||
data: false,
|
||||
ingest: false,
|
||||
ml: false
|
||||
}
|
||||
}])
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('update', t => {
|
||||
t.test('Should not update existing connections', t => {
|
||||
t.plan(2)
|
||||
class CustomConnectionPool extends ConnectionPool {
|
||||
markAlive () {
|
||||
t.fail('Should not be called')
|
||||
}
|
||||
}
|
||||
const pool = new CustomConnectionPool({ Connection })
|
||||
const pool = new ConnectionPool({ Connection })
|
||||
pool.addConnection([{
|
||||
url: new URL('http://127.0.0.1:9200'),
|
||||
id: 'a1',
|
||||
@ -547,12 +585,12 @@ test('API', t => {
|
||||
roles: null
|
||||
}])
|
||||
|
||||
t.ok(pool.connections.get('a1').roles !== null)
|
||||
t.ok(pool.connections.get('a2').roles !== null)
|
||||
t.ok(pool.connections.find(c => c.id === 'a1').roles !== null)
|
||||
t.ok(pool.connections.find(c => c.id === 'a2').roles !== null)
|
||||
})
|
||||
|
||||
t.test('Should not update existing connections (mark alive)', t => {
|
||||
t.plan(4)
|
||||
t.plan(5)
|
||||
class CustomConnectionPool extends ConnectionPool {
|
||||
markAlive (connection) {
|
||||
t.ok('called')
|
||||
@ -593,15 +631,16 @@ test('API', t => {
|
||||
roles: null
|
||||
}])
|
||||
|
||||
t.ok(pool.connections.get('a1').roles !== null)
|
||||
t.ok(pool.connections.get('a2').roles !== null)
|
||||
t.ok(pool.connections.find(c => c.id === 'a1').roles !== null)
|
||||
t.ok(pool.connections.find(c => c.id === 'a2').roles !== null)
|
||||
})
|
||||
|
||||
t.test('Should not update existing connections (same url, different id)', t => {
|
||||
t.plan(2)
|
||||
t.plan(3)
|
||||
class CustomConnectionPool extends ConnectionPool {
|
||||
markAlive () {
|
||||
t.fail('Should not be called')
|
||||
markAlive (connection) {
|
||||
t.ok('called')
|
||||
super.markAlive(connection)
|
||||
}
|
||||
}
|
||||
const pool = new CustomConnectionPool({ Connection })
|
||||
@ -623,13 +662,13 @@ test('API', t => {
|
||||
|
||||
// roles will never be updated, we only use it to do
|
||||
// a dummy check to see if the connection has been updated
|
||||
t.deepEqual(pool.connections.get('a1').roles, {
|
||||
t.deepEqual(pool.connections.find(c => c.id === 'a1').roles, {
|
||||
master: true,
|
||||
data: true,
|
||||
ingest: true,
|
||||
ml: false
|
||||
})
|
||||
t.strictEqual(pool.connections.get('http://127.0.0.1:9200/'), undefined)
|
||||
t.strictEqual(pool.connections.find(c => c.id === 'http://127.0.0.1:9200/'), undefined)
|
||||
})
|
||||
|
||||
t.test('Add a new connection', t => {
|
||||
@ -655,8 +694,8 @@ test('API', t => {
|
||||
roles: null
|
||||
}])
|
||||
|
||||
t.ok(pool.connections.get('a1').roles !== null)
|
||||
t.true(pool.connections.has('a2'))
|
||||
t.ok(pool.connections.find(c => c.id === 'a1').roles !== null)
|
||||
t.ok(pool.connections.find(c => c.id === 'a2'))
|
||||
})
|
||||
|
||||
t.test('Remove old connections', t => {
|
||||
@ -678,9 +717,37 @@ test('API', t => {
|
||||
roles: null
|
||||
}])
|
||||
|
||||
t.false(pool.connections.has('a1'))
|
||||
t.true(pool.connections.has('a2'))
|
||||
t.true(pool.connections.has('a3'))
|
||||
t.false(pool.connections.find(c => c.id === 'a1'))
|
||||
t.true(pool.connections.find(c => c.id === 'a2'))
|
||||
t.true(pool.connections.find(c => c.id === 'a3'))
|
||||
})
|
||||
|
||||
t.test('Remove old connections (markDead)', t => {
|
||||
t.plan(5)
|
||||
const pool = new ConnectionPool({ Connection, sniffEnabled: true })
|
||||
const conn = pool.addConnection({
|
||||
url: new URL('http://127.0.0.1:9200'),
|
||||
id: 'a1',
|
||||
roles: null
|
||||
})
|
||||
|
||||
pool.markDead(conn)
|
||||
t.deepEqual(pool.dead, ['a1'])
|
||||
|
||||
pool.update([{
|
||||
url: new URL('http://127.0.0.1:9200'),
|
||||
id: 'a2',
|
||||
roles: null
|
||||
}, {
|
||||
url: new URL('http://127.0.0.1:9201'),
|
||||
id: 'a3',
|
||||
roles: null
|
||||
}])
|
||||
|
||||
t.deepEqual(pool.dead, [])
|
||||
t.false(pool.connections.find(c => c.id === 'a1'))
|
||||
t.true(pool.connections.find(c => c.id === 'a2'))
|
||||
t.true(pool.connections.find(c => c.id === 'a3'))
|
||||
})
|
||||
|
||||
t.end()
|
||||
@ -732,27 +799,3 @@ test('Node filter', t => {
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
test('Single node behavior', t => {
|
||||
t.test('sniffing disabled (markDead and markAlive should be noop)', t => {
|
||||
t.plan(2)
|
||||
const pool = new ConnectionPool({ Connection, sniffEnabled: false })
|
||||
const conn = pool.addConnection('http://localhost:9200/')
|
||||
pool.markDead(conn)
|
||||
t.strictEqual(pool.dead.length, 0)
|
||||
pool.markAlive(conn)
|
||||
t.strictEqual(pool.dead.length, 0)
|
||||
})
|
||||
|
||||
t.test('sniffing enabled (markDead and markAlive should work)', t => {
|
||||
t.plan(2)
|
||||
const pool = new ConnectionPool({ Connection, sniffEnabled: true })
|
||||
const conn = pool.addConnection('http://localhost:9200/')
|
||||
pool.markDead(conn)
|
||||
t.strictEqual(pool.dead.length, 1)
|
||||
pool.markAlive(conn)
|
||||
t.strictEqual(pool.dead.length, 0)
|
||||
})
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
@ -723,11 +723,11 @@ test('setRole', t => {
|
||||
t.end()
|
||||
})
|
||||
|
||||
test('Util.inspect Connection class should hide agent and ssl', t => {
|
||||
test('Util.inspect Connection class should hide agent, ssl and auth', t => {
|
||||
t.plan(1)
|
||||
|
||||
const connection = new Connection({
|
||||
url: new URL('http://localhost:9200'),
|
||||
url: new URL('http://user:password@localhost:9200'),
|
||||
id: 'node-id',
|
||||
headers: { foo: 'bar' }
|
||||
})
|
||||
@ -741,30 +741,45 @@ test('Util.inspect Connection class should hide agent and ssl', t => {
|
||||
.replace(/(\r\n|\n|\r)/gm, '')
|
||||
}
|
||||
|
||||
t.strictEqual(cleanStr(inspect(connection)), cleanStr(`{ url:
|
||||
URL {
|
||||
href: 'http://localhost:9200/',
|
||||
origin: 'http://localhost:9200',
|
||||
protocol: 'http:',
|
||||
username: '',
|
||||
password: '',
|
||||
host: 'localhost:9200',
|
||||
hostname: 'localhost',
|
||||
port: '9200',
|
||||
pathname: '/',
|
||||
search: '',
|
||||
searchParams: URLSearchParams {},
|
||||
hash: '' },
|
||||
t.strictEqual(cleanStr(inspect(connection)), cleanStr(`{ url: 'http://localhost:9200/',
|
||||
id: 'node-id',
|
||||
headers: { foo: 'bar' },
|
||||
deadCount: 0,
|
||||
resurrectTimeout: 0,
|
||||
_openRequests: 0,
|
||||
status: 'alive',
|
||||
roles: { master: true, data: true, ingest: true, ml: false } }`)
|
||||
roles: { master: true, data: true, ingest: true, ml: false }}`)
|
||||
)
|
||||
})
|
||||
|
||||
test('connection.toJSON should hide agent, ssl and auth', t => {
|
||||
t.plan(1)
|
||||
|
||||
const connection = new Connection({
|
||||
url: new URL('http://user:password@localhost:9200'),
|
||||
id: 'node-id',
|
||||
headers: { foo: 'bar' }
|
||||
})
|
||||
|
||||
t.deepEqual(connection.toJSON(), {
|
||||
url: 'http://localhost:9200/',
|
||||
id: 'node-id',
|
||||
headers: {
|
||||
foo: 'bar'
|
||||
},
|
||||
deadCount: 0,
|
||||
resurrectTimeout: 0,
|
||||
_openRequests: 0,
|
||||
status: 'alive',
|
||||
roles: {
|
||||
master: true,
|
||||
data: true,
|
||||
ingest: true,
|
||||
ml: false
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
// https://github.com/elastic/elasticsearch-js/issues/843
|
||||
test('Port handling', t => {
|
||||
t.test('http 80', t => {
|
||||
@ -796,6 +811,53 @@ test('Port handling', t => {
|
||||
t.end()
|
||||
})
|
||||
|
||||
test('Authorization header', t => {
|
||||
t.test('None', t => {
|
||||
const connection = new Connection({
|
||||
url: new URL('http://localhost:9200')
|
||||
})
|
||||
|
||||
t.deepEqual(connection.headers, {})
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('Basic', t => {
|
||||
const connection = new Connection({
|
||||
url: new URL('http://localhost:9200'),
|
||||
auth: { username: 'foo', password: 'bar' }
|
||||
})
|
||||
|
||||
t.deepEqual(connection.headers, { authorization: 'Basic Zm9vOmJhcg==' })
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('ApiKey (string)', t => {
|
||||
const connection = new Connection({
|
||||
url: new URL('http://localhost:9200'),
|
||||
auth: { apiKey: 'Zm9vOmJhcg==' }
|
||||
})
|
||||
|
||||
t.deepEqual(connection.headers, { authorization: 'ApiKey Zm9vOmJhcg==' })
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('ApiKey (object)', t => {
|
||||
const connection = new Connection({
|
||||
url: new URL('http://localhost:9200'),
|
||||
auth: { apiKey: { id: 'foo', api_key: 'bar' } }
|
||||
})
|
||||
|
||||
t.deepEqual(connection.headers, { authorization: 'ApiKey Zm9vOmJhcg==' })
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
test('Should not add agent and ssl to the serialized connection', t => {
|
||||
const connection = new Connection({
|
||||
url: new URL('http://localhost:9200')
|
||||
@ -803,7 +865,7 @@ test('Should not add agent and ssl to the serialized connection', t => {
|
||||
|
||||
t.strictEqual(
|
||||
JSON.stringify(connection),
|
||||
'{"url":"http://localhost:9200/","id":"http://localhost:9200/","headers":null,"deadCount":0,"resurrectTimeout":0,"_openRequests":0,"status":"alive","roles":{"master":true,"data":true,"ingest":true,"ml":false}}'
|
||||
'{"url":"http://localhost:9200/","id":"http://localhost:9200/","headers":{},"deadCount":0,"resurrectTimeout":0,"_openRequests":0,"status":"alive","roles":{"master":true,"data":true,"ingest":true,"ml":false}}'
|
||||
)
|
||||
|
||||
t.end()
|
||||
|
||||
@ -19,6 +19,8 @@
|
||||
|
||||
'use strict'
|
||||
|
||||
/* eslint no-prototype-builtins: 0 */
|
||||
|
||||
const { test } = require('tap')
|
||||
const { errors } = require('../../index')
|
||||
|
||||
|
||||
@ -49,7 +49,6 @@ test('Should emit a request event when a request is performed', t => {
|
||||
body: '',
|
||||
querystring: 'q=foo%3Abar',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Content-Length': '0'
|
||||
}
|
||||
},
|
||||
@ -89,7 +88,7 @@ test('Should emit a response event in case of a successful response', t => {
|
||||
statusCode: 200,
|
||||
headers: {
|
||||
'content-type': 'application/json;utf=8',
|
||||
'connection': 'keep-alive'
|
||||
connection: 'keep-alive'
|
||||
},
|
||||
warnings: null,
|
||||
meta: {
|
||||
@ -102,7 +101,6 @@ test('Should emit a response event in case of a successful response', t => {
|
||||
body: '',
|
||||
querystring: 'q=foo%3Abar',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Content-Length': '0'
|
||||
}
|
||||
},
|
||||
@ -153,7 +151,6 @@ test('Should emit a response event with the error set', t => {
|
||||
body: '',
|
||||
querystring: 'q=foo%3Abar',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Content-Length': '0'
|
||||
}
|
||||
},
|
||||
|
||||
@ -21,6 +21,7 @@
|
||||
|
||||
const { test } = require('tap')
|
||||
const { URL } = require('url')
|
||||
const lolex = require('lolex')
|
||||
const { createGunzip } = require('zlib')
|
||||
const os = require('os')
|
||||
const intoStream = require('into-stream')
|
||||
@ -38,7 +39,7 @@ const {
|
||||
ConfigurationError
|
||||
} = require('../../lib/errors')
|
||||
|
||||
const ConnectionPool = require('../../lib/ConnectionPool')
|
||||
const ConnectionPool = require('../../lib/pool/ConnectionPool')
|
||||
const Connection = require('../../lib/Connection')
|
||||
const Serializer = require('../../lib/Serializer')
|
||||
const Transport = require('../../lib/Transport')
|
||||
@ -349,9 +350,10 @@ test('Not JSON payload from server', t => {
|
||||
})
|
||||
})
|
||||
|
||||
test('NoLivingConnectionsError', t => {
|
||||
t.plan(1)
|
||||
test('NoLivingConnectionsError (null connection)', t => {
|
||||
t.plan(3)
|
||||
const pool = new ConnectionPool({ Connection })
|
||||
pool.addConnection('http://localhost:9200')
|
||||
|
||||
const transport = new Transport({
|
||||
emit: () => {},
|
||||
@ -360,7 +362,40 @@ test('NoLivingConnectionsError', t => {
|
||||
maxRetries: 3,
|
||||
requestTimeout: 30000,
|
||||
sniffInterval: false,
|
||||
sniffOnStart: false
|
||||
sniffOnStart: false,
|
||||
nodeSelector (connections) {
|
||||
t.is(connections.length, 1)
|
||||
t.true(connections[0] instanceof Connection)
|
||||
return null
|
||||
}
|
||||
})
|
||||
|
||||
transport.request({
|
||||
method: 'GET',
|
||||
path: '/hello'
|
||||
}, (err, { body }) => {
|
||||
t.ok(err instanceof NoLivingConnectionsError)
|
||||
})
|
||||
})
|
||||
|
||||
test('NoLivingConnectionsError (undefined connection)', t => {
|
||||
t.plan(3)
|
||||
const pool = new ConnectionPool({ Connection })
|
||||
pool.addConnection('http://localhost:9200')
|
||||
|
||||
const transport = new Transport({
|
||||
emit: () => {},
|
||||
connectionPool: pool,
|
||||
serializer: new Serializer(),
|
||||
maxRetries: 3,
|
||||
requestTimeout: 30000,
|
||||
sniffInterval: false,
|
||||
sniffOnStart: false,
|
||||
nodeSelector (connections) {
|
||||
t.is(connections.length, 1)
|
||||
t.true(connections[0] instanceof Connection)
|
||||
return undefined
|
||||
}
|
||||
})
|
||||
|
||||
transport.request({
|
||||
@ -531,9 +566,8 @@ test('Retry mechanism', t => {
|
||||
if (count > 0) {
|
||||
res.end(JSON.stringify({ hello: 'world' }))
|
||||
} else {
|
||||
setTimeout(() => {
|
||||
res.end(JSON.stringify({ hello: 'world' }))
|
||||
}, 1000)
|
||||
res.statusCode = 504
|
||||
res.end(JSON.stringify({ error: true }))
|
||||
}
|
||||
count++
|
||||
}
|
||||
@ -556,7 +590,6 @@ test('Retry mechanism', t => {
|
||||
connectionPool: pool,
|
||||
serializer: new Serializer(),
|
||||
maxRetries: 1,
|
||||
requestTimeout: 250,
|
||||
sniffInterval: false,
|
||||
sniffOnStart: false
|
||||
})
|
||||
@ -572,6 +605,51 @@ test('Retry mechanism', t => {
|
||||
})
|
||||
})
|
||||
|
||||
test('Should not retry if the body is a stream', { skip: 'https://github.com/elastic/elasticsearch-js/pull/1143 has not been backported' }, t => {
|
||||
t.plan(2)
|
||||
|
||||
var count = 0
|
||||
function handler (req, res) {
|
||||
count++
|
||||
res.setHeader('Content-Type', 'application/json;utf=8')
|
||||
res.statusCode = 504
|
||||
res.end(JSON.stringify({ error: true }))
|
||||
}
|
||||
|
||||
buildServer(handler, ({ port }, server) => {
|
||||
const pool = new ConnectionPool({ Connection })
|
||||
pool.addConnection([{
|
||||
url: new URL(`http://localhost:${port}`),
|
||||
id: 'node1'
|
||||
}, {
|
||||
url: new URL(`http://localhost:${port}`),
|
||||
id: 'node2'
|
||||
}, {
|
||||
url: new URL(`http://localhost:${port}`),
|
||||
id: 'node3'
|
||||
}])
|
||||
|
||||
const transport = new Transport({
|
||||
emit: () => {},
|
||||
connectionPool: pool,
|
||||
serializer: new Serializer(),
|
||||
maxRetries: 1,
|
||||
sniffInterval: false,
|
||||
sniffOnStart: false
|
||||
})
|
||||
|
||||
transport.request({
|
||||
method: 'POST',
|
||||
path: '/hello',
|
||||
body: intoStream(JSON.stringify({ hello: 'world' }))
|
||||
}, (err, { body }) => {
|
||||
t.ok(err instanceof ResponseError)
|
||||
t.strictEqual(count, 1)
|
||||
server.stop()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
test('Custom retry mechanism', t => {
|
||||
t.plan(2)
|
||||
|
||||
@ -581,9 +659,8 @@ test('Custom retry mechanism', t => {
|
||||
if (count > 0) {
|
||||
res.end(JSON.stringify({ hello: 'world' }))
|
||||
} else {
|
||||
setTimeout(() => {
|
||||
res.end(JSON.stringify({ hello: 'world' }))
|
||||
}, 1000)
|
||||
res.statusCode = 504
|
||||
res.end(JSON.stringify({ error: true }))
|
||||
}
|
||||
count++
|
||||
}
|
||||
@ -606,7 +683,6 @@ test('Custom retry mechanism', t => {
|
||||
connectionPool: pool,
|
||||
serializer: new Serializer(),
|
||||
maxRetries: 0,
|
||||
requestTimeout: 250,
|
||||
sniffInterval: false,
|
||||
sniffOnStart: false
|
||||
})
|
||||
@ -893,148 +969,95 @@ test('Override requestTimeout', t => {
|
||||
|
||||
test('sniff', t => {
|
||||
t.test('sniffOnStart', t => {
|
||||
t.plan(3)
|
||||
t.plan(1)
|
||||
|
||||
class CustomConnectionPool extends ConnectionPool {
|
||||
update () {
|
||||
t.ok('called')
|
||||
return this
|
||||
}
|
||||
|
||||
nodesToHost (nodes) {
|
||||
t.ok('called')
|
||||
return []
|
||||
class MyTransport extends Transport {
|
||||
sniff (opts) {
|
||||
t.strictEqual(opts.reason, Transport.sniffReasons.SNIFF_ON_START)
|
||||
}
|
||||
}
|
||||
|
||||
function handler (req, res) {
|
||||
t.strictEqual(req.url, '/sniff')
|
||||
res.setHeader('Content-Type', 'application/json;utf=8')
|
||||
res.end(JSON.stringify({ hello: 'world' }))
|
||||
}
|
||||
const pool = new ConnectionPool({ Connection })
|
||||
pool.addConnection('http://localhost:9200')
|
||||
|
||||
buildServer(handler, ({ port }, server) => {
|
||||
const pool = new CustomConnectionPool({ Connection })
|
||||
pool.addConnection(`http://localhost:${port}`)
|
||||
|
||||
// eslint-disable-next-line
|
||||
new Transport({
|
||||
emit: () => {},
|
||||
connectionPool: pool,
|
||||
serializer: new Serializer(),
|
||||
maxRetries: 3,
|
||||
requestTimeout: 30000,
|
||||
sniffInterval: false,
|
||||
sniffOnStart: true,
|
||||
sniffEndpoint: '/sniff'
|
||||
})
|
||||
|
||||
setTimeout(() => server.stop(), 100)
|
||||
// eslint-disable-next-line
|
||||
new MyTransport({
|
||||
emit: () => {},
|
||||
connectionPool: pool,
|
||||
serializer: new Serializer(),
|
||||
maxRetries: 3,
|
||||
requestTimeout: 30000,
|
||||
sniffInterval: false,
|
||||
sniffOnStart: true,
|
||||
sniffEndpoint: '/sniff'
|
||||
})
|
||||
})
|
||||
|
||||
t.test('sniffOnConnectionFault', t => {
|
||||
t.plan(3)
|
||||
t.plan(2)
|
||||
|
||||
class CustomConnectionPool extends ConnectionPool {
|
||||
update () {
|
||||
t.ok('called')
|
||||
return this
|
||||
}
|
||||
|
||||
nodesToHost (nodes) {
|
||||
t.ok('called')
|
||||
return []
|
||||
class MyTransport extends Transport {
|
||||
sniff (opts) {
|
||||
t.strictEqual(opts.reason, Transport.sniffReasons.SNIFF_ON_CONNECTION_FAULT)
|
||||
}
|
||||
}
|
||||
|
||||
function handler (req, res) {
|
||||
if (req.url === '/other/sniff') {
|
||||
res.setHeader('Content-Type', 'application/json;utf=8')
|
||||
res.end(JSON.stringify({ hello: 'world' }))
|
||||
} else {
|
||||
setTimeout(() => res.end(), 1000)
|
||||
}
|
||||
}
|
||||
const pool = new ConnectionPool({ Connection: MockConnectionTimeout })
|
||||
pool.addConnection('http://localhost:9200')
|
||||
|
||||
buildServer(handler, ({ port }, server) => {
|
||||
const pool = new CustomConnectionPool({ Connection })
|
||||
pool.addConnection(`http://localhost:${port}`)
|
||||
pool.addConnection(`http://localhost:${port}/other`)
|
||||
const transport = new MyTransport({
|
||||
emit: () => {},
|
||||
connectionPool: pool,
|
||||
serializer: new Serializer(),
|
||||
maxRetries: 0,
|
||||
requestTimeout: 500,
|
||||
sniffInterval: false,
|
||||
sniffOnConnectionFault: true,
|
||||
sniffEndpoint: '/sniff'
|
||||
})
|
||||
|
||||
const transport = new Transport({
|
||||
emit: () => {},
|
||||
connectionPool: pool,
|
||||
serializer: new Serializer(),
|
||||
maxRetries: 0,
|
||||
requestTimeout: 500,
|
||||
sniffInterval: false,
|
||||
sniffOnConnectionFault: true,
|
||||
sniffEndpoint: '/sniff'
|
||||
})
|
||||
|
||||
transport.request({
|
||||
method: 'GET',
|
||||
path: '/'
|
||||
}, (err, { body }) => {
|
||||
t.ok(err instanceof TimeoutError)
|
||||
})
|
||||
|
||||
setTimeout(() => server.stop(), 1100)
|
||||
transport.request({
|
||||
method: 'GET',
|
||||
path: '/'
|
||||
}, (err, { body }) => {
|
||||
t.ok(err instanceof TimeoutError)
|
||||
})
|
||||
})
|
||||
|
||||
t.test('sniffInterval', t => {
|
||||
t.plan(9)
|
||||
t.plan(6)
|
||||
|
||||
class CustomConnectionPool extends ConnectionPool {
|
||||
update () {
|
||||
return this
|
||||
}
|
||||
const clock = lolex.install({ toFake: ['Date'] })
|
||||
t.teardown(() => clock.uninstall())
|
||||
|
||||
nodesToHost (nodes) {
|
||||
return []
|
||||
class MyTransport extends Transport {
|
||||
sniff (opts) {
|
||||
t.strictEqual(opts.reason, Transport.sniffReasons.SNIFF_INTERVAL)
|
||||
}
|
||||
}
|
||||
|
||||
function handler (req, res) {
|
||||
// this should be called 6 times
|
||||
t.ok('called')
|
||||
res.setHeader('Content-Type', 'application/json;utf=8')
|
||||
res.end(JSON.stringify({ hello: 'world' }))
|
||||
}
|
||||
const pool = new ConnectionPool({ Connection: MockConnection })
|
||||
pool.addConnection('http://localhost:9200')
|
||||
|
||||
buildServer(handler, ({ port }, server) => {
|
||||
const pool = new CustomConnectionPool({ Connection })
|
||||
pool.addConnection(`http://localhost:${port}`)
|
||||
|
||||
const transport = new Transport({
|
||||
emit: () => {},
|
||||
connectionPool: pool,
|
||||
serializer: new Serializer(),
|
||||
maxRetries: 3,
|
||||
requestTimeout: 3000,
|
||||
sniffInterval: 1,
|
||||
sniffEndpoint: '/sniff'
|
||||
})
|
||||
|
||||
const params = { method: 'GET', path: '/' }
|
||||
setTimeout(() => {
|
||||
transport.request(params, t.error)
|
||||
}, 100)
|
||||
|
||||
setTimeout(() => {
|
||||
transport.request(params, t.error)
|
||||
}, 200)
|
||||
|
||||
setTimeout(() => {
|
||||
transport.request(params, t.error)
|
||||
}, 300)
|
||||
|
||||
setTimeout(() => {
|
||||
server.stop()
|
||||
}, 400)
|
||||
const transport = new MyTransport({
|
||||
emit: () => {},
|
||||
connectionPool: pool,
|
||||
serializer: new Serializer(),
|
||||
maxRetries: 3,
|
||||
requestTimeout: 3000,
|
||||
sniffInterval: 1,
|
||||
sniffEndpoint: '/sniff'
|
||||
})
|
||||
|
||||
const params = { method: 'GET', path: '/' }
|
||||
clock.tick(100)
|
||||
transport.request(params, t.error)
|
||||
|
||||
clock.tick(200)
|
||||
transport.request(params, t.error)
|
||||
|
||||
clock.tick(300)
|
||||
transport.request(params, t.error)
|
||||
})
|
||||
|
||||
t.test('errored', t => {
|
||||
@ -1880,6 +1903,55 @@ test('Compress request', t => {
|
||||
}
|
||||
})
|
||||
|
||||
t.test('Should skip the compression for empty strings/null/undefined', t => {
|
||||
t.plan(9)
|
||||
|
||||
function handler (req, res) {
|
||||
t.strictEqual(req.headers['content-encoding'], undefined)
|
||||
t.strictEqual(req.headers['content-type'], undefined)
|
||||
res.end()
|
||||
}
|
||||
|
||||
buildServer(handler, ({ port }, server) => {
|
||||
const pool = new ConnectionPool({ Connection })
|
||||
pool.addConnection(`http://localhost:${port}`)
|
||||
|
||||
const transport = new Transport({
|
||||
emit: () => {},
|
||||
connectionPool: pool,
|
||||
serializer: new Serializer(),
|
||||
maxRetries: 3,
|
||||
compression: 'gzip',
|
||||
requestTimeout: 30000,
|
||||
sniffInterval: false,
|
||||
sniffOnStart: false
|
||||
})
|
||||
|
||||
transport.request({
|
||||
method: 'DELETE',
|
||||
path: '/hello',
|
||||
body: ''
|
||||
}, (err, { body }) => {
|
||||
t.error(err)
|
||||
transport.request({
|
||||
method: 'GET',
|
||||
path: '/hello',
|
||||
body: null
|
||||
}, (err, { body }) => {
|
||||
t.error(err)
|
||||
transport.request({
|
||||
method: 'GET',
|
||||
path: '/hello',
|
||||
body: undefined
|
||||
}, (err, { body }) => {
|
||||
t.error(err)
|
||||
server.stop()
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
@ -2175,3 +2247,71 @@ test('Should pass request params and options to generateRequestId', t => {
|
||||
|
||||
transport.request(params, options, t.error)
|
||||
})
|
||||
|
||||
test('Secure json parsing', t => {
|
||||
t.test('__proto__ protection', t => {
|
||||
t.plan(2)
|
||||
function handler (req, res) {
|
||||
res.setHeader('Content-Type', 'application/json;utf=8')
|
||||
res.end('{"__proto__":{"a":1}}')
|
||||
}
|
||||
|
||||
buildServer(handler, ({ port }, server) => {
|
||||
const pool = new ConnectionPool({ Connection })
|
||||
pool.addConnection(`http://localhost:${port}`)
|
||||
|
||||
const transport = new Transport({
|
||||
emit: () => {},
|
||||
connectionPool: pool,
|
||||
serializer: new Serializer(),
|
||||
maxRetries: 3,
|
||||
requestTimeout: 30000,
|
||||
sniffInterval: false,
|
||||
sniffOnStart: false
|
||||
})
|
||||
|
||||
transport.request({
|
||||
method: 'GET',
|
||||
path: '/hello'
|
||||
}, (err, { body }) => {
|
||||
t.true(err instanceof DeserializationError)
|
||||
t.is(err.message, 'Object contains forbidden prototype property')
|
||||
server.stop()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
t.test('constructor protection', t => {
|
||||
t.plan(2)
|
||||
function handler (req, res) {
|
||||
res.setHeader('Content-Type', 'application/json;utf=8')
|
||||
res.end('{"constructor":{"prototype":{"bar":"baz"}}}')
|
||||
}
|
||||
|
||||
buildServer(handler, ({ port }, server) => {
|
||||
const pool = new ConnectionPool({ Connection })
|
||||
pool.addConnection(`http://localhost:${port}`)
|
||||
|
||||
const transport = new Transport({
|
||||
emit: () => {},
|
||||
connectionPool: pool,
|
||||
serializer: new Serializer(),
|
||||
maxRetries: 3,
|
||||
requestTimeout: 30000,
|
||||
sniffInterval: false,
|
||||
sniffOnStart: false
|
||||
})
|
||||
|
||||
transport.request({
|
||||
method: 'GET',
|
||||
path: '/hello'
|
||||
}, (err, { body }) => {
|
||||
t.true(err instanceof DeserializationError)
|
||||
t.is(err.message, 'Object contains forbidden prototype property')
|
||||
server.stop()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
@ -30,8 +30,8 @@ class MockConnection extends Connection {
|
||||
stream.statusCode = setStatusCode(params.path)
|
||||
stream.headers = {
|
||||
'content-type': 'application/json;utf=8',
|
||||
'date': new Date().toISOString(),
|
||||
'connection': 'keep-alive',
|
||||
date: new Date().toISOString(),
|
||||
connection: 'keep-alive',
|
||||
'content-length': '17'
|
||||
}
|
||||
process.nextTick(() => {
|
||||
@ -96,8 +96,8 @@ class MockConnectionSniff extends Connection {
|
||||
stream.statusCode = setStatusCode(params.path)
|
||||
stream.headers = {
|
||||
'content-type': 'application/json;utf=8',
|
||||
'date': new Date().toISOString(),
|
||||
'connection': 'keep-alive',
|
||||
date: new Date().toISOString(),
|
||||
connection: 'keep-alive',
|
||||
'content-length': '205'
|
||||
}
|
||||
process.nextTick(() => {
|
||||
|
||||
@ -71,14 +71,17 @@ function buildCluster (options, callback) {
|
||||
|
||||
function shutdown () {
|
||||
debug(`Shutting down cluster '${clusterId}'`)
|
||||
Object.keys(nodes).forEach(kill)
|
||||
for (const id in nodes) {
|
||||
kill(id)
|
||||
}
|
||||
}
|
||||
|
||||
function kill (id) {
|
||||
function kill (id, callback) {
|
||||
debug(`Shutting down cluster node '${id}' (cluster id: '${clusterId}')`)
|
||||
nodes[id].server.stop()
|
||||
const node = nodes[id]
|
||||
delete nodes[id]
|
||||
delete sniffResult.nodes[id]
|
||||
node.server.stop(callback)
|
||||
}
|
||||
|
||||
function spawn (id, callback) {
|
||||
|
||||
Reference in New Issue
Block a user