You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
I am trying to connect to my Elasticsearch cluster on Elastic Cloud, but I keep getting the above error. This is the entire stack trace,
---------------------------------------------------------------------------
gaierror Traceback (most recent call last)
File ~\Anaconda3\envs\elasticsearch\lib\site-packages\urllib3\connection.py:174, in HTTPConnection._new_conn(self)
173 try:
--> 174 conn = connection.create_connection(
175 (self._dns_host, self.port), self.timeout, **extra_kw
176 )
178 except SocketTimeout:
File ~\Anaconda3\envs\elasticsearch\lib\site-packages\urllib3\util\connection.py:72, in create_connection(address, timeout, source_address, socket_options)
68 return six.raise_from(
69 LocationParseError(u"'%s', label empty or too long" % host), None
70 )
---> 72 for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
73 af, socktype, proto, canonname, sa = res
File ~\Anaconda3\envs\elasticsearch\lib\socket.py:955, in getaddrinfo(host, port, family, type, proto, flags)
954 addrlist = []
--> 955 for res in _socket.getaddrinfo(host, port, family, type, proto, flags):
956 af, socktype, proto, canonname, sa = res
gaierror: [Errno 11001] getaddrinfo failed
During handling of the above exception, another exception occurred:
NewConnectionError Traceback (most recent call last)
File ~\Anaconda3\envs\elasticsearch\lib\site-packages\elasticsearch\connection\http_urllib3.py:251, in Urllib3HttpConnection.perform_request(self, method, url, params, body, timeout, ignore, headers)
249 request_headers["content-encoding"] = "gzip"
--> 251 response = self.pool.urlopen(
252 method, url, body, retries=Retry(False), headers=request_headers, **kw
253 )
254 duration = time.time() - start
File ~\Anaconda3\envs\elasticsearch\lib\site-packages\urllib3\connectionpool.py:787, in HTTPConnectionPool.urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw)
785 e = ProtocolError("Connection aborted.", e)
--> 787 retries = retries.increment(
788 method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2]
789 )
790 retries.sleep()
File ~\Anaconda3\envs\elasticsearch\lib\site-packages\urllib3\util\retry.py:525, in Retry.increment(self, method, url, response, error, _pool, _stacktrace)
523 if self.total is False and error:
524 # Disabled, indicate to re-raise the error.
--> 525 raise six.reraise(type(error), error, _stacktrace)
527 total = self.total
File ~\Anaconda3\envs\elasticsearch\lib\site-packages\urllib3\packages\six.py:770, in reraise(tp, value, tb)
769 raise value.with_traceback(tb)
--> 770 raise value
771 finally:
File ~\Anaconda3\envs\elasticsearch\lib\site-packages\urllib3\connectionpool.py:703, in HTTPConnectionPool.urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw)
702 # Make the request on the httplib connection object.
--> 703 httplib_response = self._make_request(
704 conn,
705 method,
706 url,
707 timeout=timeout_obj,
708 body=body,
709 headers=headers,
710 chunked=chunked,
711 )
713 # If we're going to release the connection in ``finally:``, then
714 # the response doesn't need to know about the connection. Otherwise
715 # it will also try to release it and we'll have a double-release
716 # mess.
File ~\Anaconda3\envs\elasticsearch\lib\site-packages\urllib3\connectionpool.py:386, in HTTPConnectionPool._make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw)
385 try:
--> 386 self._validate_conn(conn)
387 except (SocketTimeout, BaseSSLError) as e:
388 # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
File ~\Anaconda3\envs\elasticsearch\lib\site-packages\urllib3\connectionpool.py:1042, in HTTPSConnectionPool._validate_conn(self, conn)
1041 if not getattr(conn, "sock", None): # AppEngine might not have `.sock`
-> 1042 conn.connect()
1044 if not conn.is_verified:
File ~\Anaconda3\envs\elasticsearch\lib\site-packages\urllib3\connection.py:358, in HTTPSConnection.connect(self)
356 def connect(self):
357 # Add certificate verification
--> 358 self.sock = conn = self._new_conn()
359 hostname = self.host
File ~\Anaconda3\envs\elasticsearch\lib\site-packages\urllib3\connection.py:186, in HTTPConnection._new_conn(self)
185 except SocketError as e:
--> 186 raise NewConnectionError(
187 self, "Failed to establish a new connection: %s" % e
188 )
190 return conn
NewConnectionError: <urllib3.connection.HTTPSConnection object at 0x00000236CFD02CE0>: Failed to establish a new connection: [Errno 11001] getaddrinfo failed
During handling of the above exception, another exception occurred:
ConnectionError Traceback (most recent call last)
File ~\Anaconda3\envs\elasticsearch\lib\site-packages\es\baseapi.py:318, in BaseCursor.elastic_query(self, query)
317 try:
--> 318 response = self.es.transport.perform_request("POST", path, body=payload)
319 except es_exceptions.ConnectionError:
File ~\Anaconda3\envs\elasticsearch\lib\site-packages\elasticsearch\transport.py:413, in Transport.perform_request(self, method, url, headers, params, body)
412 if attempt == self.max_retries:
--> 413 raise e
414 else:
File ~\Anaconda3\envs\elasticsearch\lib\site-packages\elasticsearch\transport.py:381, in Transport.perform_request(self, method, url, headers, params, body)
380 try:
--> 381 status, headers_response, data = connection.perform_request(
382 method,
383 url,
384 params,
385 body,
386 headers=headers,
387 ignore=ignore,
388 timeout=timeout,
389 )
391 except TransportError as e:
File ~\Anaconda3\envs\elasticsearch\lib\site-packages\elasticsearch\connection\http_urllib3.py:266, in Urllib3HttpConnection.perform_request(self, method, url, params, body, timeout, ignore, headers)
265 raise ConnectionTimeout("TIMEOUT", str(e), e)
--> 266 raise ConnectionError("N/A", str(e), e)
268 # raise warnings if any from the 'Warnings' header.
ConnectionError: ConnectionError(<urllib3.connection.HTTPSConnection object at 0x00000236CFD02CE0>: Failed to establish a new connection: [Errno 11001] getaddrinfo failed) caused by: NewConnectionError(<urllib3.connection.HTTPSConnection object at 0x00000236CFD02CE0>: Failed to establish a new connection: [Errno 11001] getaddrinfo failed)
During handling of the above exception, another exception occurred:
OperationalError Traceback (most recent call last)
Input In [29], in <cell line: 2>()
1 curs = conn.cursor()
----> 2 curs.execute(
3 "select * from kibana_sample_data_ecommerce LIMIT 10"
4 )
File ~\Anaconda3\envs\elasticsearch\lib\site-packages\es\baseapi.py:36, in check_closed.<locals>.wrap(self, *args, **kwargs)
32 if self.closed:
33 raise exceptions.Error(
34 "{klass} already closed".format(klass=self.__class__.__name__)
35 )
---> 36 return f(self, *args, **kwargs)
File ~\Anaconda3\envs\elasticsearch\lib\site-packages\es\elastic\api.py:158, in Cursor.execute(self, operation, parameters)
155 return self.get_array_type_columns(re_table_name[1])
157 query = apply_parameters(operation, parameters)
--> 158 results = self.elastic_query(query)
159 # We need a list of tuples
160 rows = [tuple(row) for row in results.get("rows", [])]
File ~\Anaconda3\envs\elasticsearch\lib\site-packages\es\baseapi.py:320, in BaseCursor.elastic_query(self, query)
318 response = self.es.transport.perform_request("POST", path, body=payload)
319 except es_exceptions.ConnectionError:
--> 320 raise exceptions.OperationalError("Error connecting to Elasticsearch")
321 except es_exceptions.RequestError as ex:
322 raise exceptions.ProgrammingError(f"Error ({ex.error}): {ex.info}")
OperationalError: Error connecting to Elasticsearch
This is what my code looks like,
from es.elastic.api import connect
conn = connect(
host='https://my-deployment-191d9e.es.us-east-2.aws.elastic-cloud.com',
scheme="https",
user='elastic',
password='<password>'
)
curs = conn.cursor()
curs.execute(
"select * from kibana_sample_data_ecommerce LIMIT 10"
)
I have a feeling that there is something wrong with my host URL? I was able to establish a connection earlier using the elasticsearch package, but this was using the Cloud ID for the cluster.
Any help on this would be greatly appreciated.
The text was updated successfully, but these errors were encountered:
I am trying to connect to my Elasticsearch cluster on Elastic Cloud, but I keep getting the above error. This is the entire stack trace,
This is what my code looks like,
I have a feeling that there is something wrong with my host URL? I was able to establish a connection earlier using the
elasticsearch
package, but this was using the Cloud ID for the cluster.Any help on this would be greatly appreciated.
The text was updated successfully, but these errors were encountered: