Asyncronous refactorization pushed as main version 💯
This commit is contained in:
parent
ed547a27f4
commit
a4b89e6e5a
205
eris.py
205
eris.py
@ -1,18 +1,21 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# Elasticsearch Recon Ingestion Scripts (ERIS) - Developed by Acidvegas (https://git.acid.vegas/eris)
|
# Elasticsearch Recon Ingestion Scripts (ERIS) - Developed by Acidvegas (https://git.acid.vegas/eris)
|
||||||
|
# eris.py
|
||||||
|
|
||||||
|
import asyncio
|
||||||
import argparse
|
import argparse
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import stat
|
import stat
|
||||||
import time
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
sys.dont_write_bytecode = True
|
sys.dont_write_bytecode = True
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from elasticsearch import Elasticsearch, helpers
|
# This is commented out because there is a bug with the elasticsearch library that requires a patch (see initialize() method below)
|
||||||
|
#from elasticsearch import AsyncElasticsearch
|
||||||
from elasticsearch.exceptions import NotFoundError
|
from elasticsearch.exceptions import NotFoundError
|
||||||
|
from elasticsearch.helpers import async_streaming_bulk
|
||||||
except ImportError:
|
except ImportError:
|
||||||
raise ImportError('Missing required \'elasticsearch\' library. (pip install elasticsearch)')
|
raise ImportError('Missing required \'elasticsearch\' library. (pip install elasticsearch)')
|
||||||
|
|
||||||
@ -28,48 +31,51 @@ class ElasticIndexer:
|
|||||||
:param args: Parsed arguments from argparse
|
:param args: Parsed arguments from argparse
|
||||||
'''
|
'''
|
||||||
|
|
||||||
self.chunk_max = args.chunk_max * 1024 * 1024 # MB
|
self.chunk_max = args.chunk_max * 1024 * 1024 # MB
|
||||||
self.chunk_size = args.chunk_size
|
self.chunk_size = args.chunk_size
|
||||||
self.chunk_threads = args.chunk_threads
|
self.es = None
|
||||||
self.dry_run = args.dry_run
|
|
||||||
self.es_index = args.index
|
self.es_index = args.index
|
||||||
|
|
||||||
if not args.dry_run:
|
self.es_config = {
|
||||||
es_config = {
|
'hosts': [f'{args.host}:{args.port}'],
|
||||||
'hosts': [f'{args.host}:{args.port}'],
|
'verify_certs': args.self_signed,
|
||||||
'verify_certs': args.self_signed,
|
'ssl_show_warn': args.self_signed,
|
||||||
'ssl_show_warn': args.self_signed,
|
'request_timeout': args.timeout,
|
||||||
'request_timeout': args.timeout,
|
'max_retries': args.retries,
|
||||||
'max_retries': args.retries,
|
'retry_on_timeout': True,
|
||||||
'retry_on_timeout': True,
|
'sniff_on_start': True, # Is this problematic?
|
||||||
'sniff_on_start': False,
|
'sniff_on_node_failure': True,
|
||||||
'sniff_on_node_failure': True,
|
'min_delay_between_sniffing': 60 # Add config option for this?
|
||||||
'min_delay_between_sniffing': 60 # Add config option for this?
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if args.api_key:
|
if args.api_key:
|
||||||
es_config['headers'] = {'Authorization': f'ApiKey {args.api_key}'}
|
self.es_config['api_key'] = (args.api_key, '') # Verify this is correct
|
||||||
else:
|
else:
|
||||||
es_config['basic_auth'] = (args.user, args.password)
|
self.es_config['basic_auth'] = (args.user, args.password)
|
||||||
|
|
||||||
# Patching the Elasticsearch client to fix a bug with sniffing (https://github.com/elastic/elasticsearch-py/issues/2005#issuecomment-1645641960)
|
|
||||||
import sniff_patch
|
async def initialize(self):
|
||||||
self.es = sniff_patch.init_elasticsearch(**es_config)
|
'''Initialize the Elasticsearch client.'''
|
||||||
|
|
||||||
# Remove the above and uncomment the below if the bug is fixed in the Elasticsearch client:
|
# Patching the Elasticsearch client to fix a bug with sniffing (https://github.com/elastic/elasticsearch-py/issues/2005#issuecomment-1645641960)
|
||||||
#self.es = Elasticsearch(**es_config)
|
import sniff_patch
|
||||||
|
self.es = sniff_patch.init_elasticsearch(**self.es_config)
|
||||||
|
|
||||||
|
# Remove the above and uncomment the below if the bug is fixed in the Elasticsearch client:
|
||||||
|
#self.es = AsyncElasticsearch(**es_config)
|
||||||
|
|
||||||
|
|
||||||
def create_index(self, map_body: dict, pipeline: str = '', replicas: int = 1, shards: int = 1, ):
|
async def create_index(self, map_body: dict, pipeline: str = '', replicas: int = 1, shards: int = 1):
|
||||||
'''
|
'''
|
||||||
Create the Elasticsearch index with the defined mapping.
|
Create the Elasticsearch index with the defined mapping.
|
||||||
|
|
||||||
:param pipline: Name of the ingest pipeline to use for the index
|
:param map_body: Mapping for the index
|
||||||
|
:param pipeline: Name of the ingest pipeline to use for the index
|
||||||
:param replicas: Number of replicas for the index
|
:param replicas: Number of replicas for the index
|
||||||
:param shards: Number of shards for the index
|
:param shards: Number of shards for the index
|
||||||
'''
|
'''
|
||||||
|
|
||||||
if self.es.indices.exists(index=self.es_index):
|
if await self.es.indices.exists(index=self.es_index):
|
||||||
logging.info(f'Index \'{self.es_index}\' already exists.')
|
logging.info(f'Index \'{self.es_index}\' already exists.')
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -82,13 +88,13 @@ class ElasticIndexer:
|
|||||||
|
|
||||||
if pipeline:
|
if pipeline:
|
||||||
try:
|
try:
|
||||||
self.es.ingest.get_pipeline(id=pipeline)
|
await self.es.ingest.get_pipeline(id=pipeline)
|
||||||
logging.info(f'Using ingest pipeline \'{pipeline}\' for index \'{self.es_index}\'')
|
logging.info(f'Using ingest pipeline \'{pipeline}\' for index \'{self.es_index}\'')
|
||||||
mapping['settings']['index.default_pipeline'] = pipeline
|
mapping['settings']['index.default_pipeline'] = pipeline
|
||||||
except NotFoundError:
|
except NotFoundError:
|
||||||
raise ValueError(f'Ingest pipeline \'{pipeline}\' does not exist.')
|
raise ValueError(f'Ingest pipeline \'{pipeline}\' does not exist.')
|
||||||
|
|
||||||
response = self.es.indices.create(index=self.es_index, body=mapping)
|
response = await self.es.indices.create(index=self.es_index, body=mapping)
|
||||||
|
|
||||||
if response.get('acknowledged') and response.get('shards_acknowledged'):
|
if response.get('acknowledged') and response.get('shards_acknowledged'):
|
||||||
logging.info(f'Index \'{self.es_index}\' successfully created.')
|
logging.info(f'Index \'{self.es_index}\' successfully created.')
|
||||||
@ -96,106 +102,62 @@ class ElasticIndexer:
|
|||||||
raise Exception(f'Failed to create index. ({response})')
|
raise Exception(f'Failed to create index. ({response})')
|
||||||
|
|
||||||
|
|
||||||
def get_cluster_health(self) -> dict:
|
async def get_cluster_health(self) -> dict:
|
||||||
'''Get the health of the Elasticsearch cluster.'''
|
'''Get the health of the Elasticsearch cluster.'''
|
||||||
|
|
||||||
return self.es.cluster.health()
|
return await self.es.cluster.health()
|
||||||
|
|
||||||
|
|
||||||
def get_cluster_size(self) -> int:
|
async def get_cluster_size(self) -> int:
|
||||||
'''Get the number of nodes in the Elasticsearch cluster.'''
|
'''Get the number of nodes in the Elasticsearch cluster.'''
|
||||||
|
|
||||||
cluster_stats = self.es.cluster.stats()
|
cluster_stats = await self.es.cluster.stats()
|
||||||
number_of_nodes = cluster_stats['nodes']['count']['total']
|
number_of_nodes = cluster_stats['nodes']['count']['total']
|
||||||
|
|
||||||
return number_of_nodes
|
return number_of_nodes
|
||||||
|
|
||||||
|
|
||||||
def bulk_index(self, documents: list, file_path: str, count: int):
|
async def process_data(self, file_path: str, data_generator: callable):
|
||||||
'''
|
'''
|
||||||
Index a batch of documents to Elasticsearch.
|
Index records in chunks to Elasticsearch.
|
||||||
|
|
||||||
:param documents: List of documents to index
|
|
||||||
:param file_path: Path to the file being indexed
|
|
||||||
:param count: Total number of records processed
|
|
||||||
'''
|
|
||||||
|
|
||||||
remaining_documents = documents
|
|
||||||
|
|
||||||
parallel_bulk_config = {
|
|
||||||
'client': self.es,
|
|
||||||
'chunk_size': self.chunk_size,
|
|
||||||
'max_chunk_bytes': self.chunk_max,
|
|
||||||
'thread_count': self.chunk_threads,
|
|
||||||
'queue_size': 2 # Add config option for this?
|
|
||||||
}
|
|
||||||
|
|
||||||
while remaining_documents:
|
|
||||||
failed_documents = []
|
|
||||||
|
|
||||||
try:
|
|
||||||
for success, response in helpers.parallel_bulk(actions=remaining_documents, **parallel_bulk_config):
|
|
||||||
if not success:
|
|
||||||
failed_documents.append(response)
|
|
||||||
|
|
||||||
if not failed_documents:
|
|
||||||
ingested = parallel_bulk_config['chunk_size'] * parallel_bulk_config['thread_count']
|
|
||||||
logging.info(f'Successfully indexed {ingested:,} ({count:,} processed) records to {self.es_index} from {file_path}')
|
|
||||||
break
|
|
||||||
|
|
||||||
else:
|
|
||||||
logging.warning(f'Failed to index {len(failed_documents):,} failed documents! Retrying...')
|
|
||||||
remaining_documents = failed_documents
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f'Failed to index documents! ({e})')
|
|
||||||
time.sleep(30) # Should we add a config option for this?
|
|
||||||
|
|
||||||
|
|
||||||
def process_file(self, file_path: str, batch_size: int, ingest_function: callable):
|
|
||||||
'''
|
|
||||||
Read and index records in batches to Elasticsearch.
|
|
||||||
|
|
||||||
:param file_path: Path to the file
|
:param file_path: Path to the file
|
||||||
:param batch_size: Number of records to index per batch
|
:param index_name: Name of the index
|
||||||
:param ingest_function: Function to process the file
|
:param data_generator: Generator for the records to index
|
||||||
'''
|
'''
|
||||||
|
|
||||||
count = 0
|
count = 0
|
||||||
records = []
|
total = 0
|
||||||
|
|
||||||
|
async for ok, result in async_streaming_bulk(self.es, actions=data_generator(file_path), chunk_size=self.chunk_size, max_chunk_bytes=self.chunk_max):
|
||||||
|
action, result = result.popitem()
|
||||||
|
|
||||||
for processed in ingest_function(file_path):
|
if not ok:
|
||||||
|
logging.error(f'Failed to index document ({result["_id"]}) to {self.es_index} from {file_path} ({result})')
|
||||||
if not processed:
|
input('Press Enter to continue...') # Debugging (will possibly remove this since we have retries enabled)
|
||||||
break
|
|
||||||
|
|
||||||
if self.dry_run:
|
|
||||||
print(processed)
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
struct = {'_index': self.es_index, '_source': processed}
|
|
||||||
records.append(struct)
|
|
||||||
count += 1
|
count += 1
|
||||||
|
total += 1
|
||||||
if len(records) >= batch_size:
|
|
||||||
self.bulk_index(records, file_path, count)
|
|
||||||
records = []
|
|
||||||
|
|
||||||
if records:
|
if count == self.chunk_size:
|
||||||
self.bulk_index(records, file_path, count)
|
logging.info(f'Successfully indexed {self.chunk_size:,} ({total:,} processed) records to {self.es_index} from {file_path}')
|
||||||
|
count = 0
|
||||||
|
|
||||||
|
logging.info(f'Finished indexing {total:,} records to {self.es_index} from {file_path}')
|
||||||
|
|
||||||
|
|
||||||
def main():
|
async def main():
|
||||||
'''Main function when running this script directly.'''
|
'''Main function when running this script directly.'''
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description='Index data into Elasticsearch.')
|
parser = argparse.ArgumentParser(description='Index data into Elasticsearch.')
|
||||||
|
|
||||||
# General arguments
|
# General arguments
|
||||||
parser.add_argument('input_path', help='Path to the input file or directory') # Required
|
parser.add_argument('input_path', help='Path to the input file or directory') # Required
|
||||||
parser.add_argument('--dry-run', action='store_true', help='Dry run (do not index records to Elasticsearch)')
|
|
||||||
parser.add_argument('--watch', action='store_true', help='Create or watch a FIFO for real-time indexing')
|
parser.add_argument('--watch', action='store_true', help='Create or watch a FIFO for real-time indexing')
|
||||||
|
|
||||||
# Elasticsearch arguments
|
# Elasticsearch arguments
|
||||||
parser.add_argument('--host', default='localhost', help='Elasticsearch host')
|
parser.add_argument('--host', default='http://localhost/', help='Elasticsearch host')
|
||||||
parser.add_argument('--port', type=int, default=9200, help='Elasticsearch port')
|
parser.add_argument('--port', type=int, default=9200, help='Elasticsearch port')
|
||||||
parser.add_argument('--user', default='elastic', help='Elasticsearch username')
|
parser.add_argument('--user', default='elastic', help='Elasticsearch username')
|
||||||
parser.add_argument('--password', default=os.getenv('ES_PASSWORD'), help='Elasticsearch password (if not provided, check environment variable ES_PASSWORD)')
|
parser.add_argument('--password', default=os.getenv('ES_PASSWORD'), help='Elasticsearch password (if not provided, check environment variable ES_PASSWORD)')
|
||||||
@ -206,16 +168,16 @@ def main():
|
|||||||
parser.add_argument('--index', help='Elasticsearch index name')
|
parser.add_argument('--index', help='Elasticsearch index name')
|
||||||
parser.add_argument('--pipeline', help='Use an ingest pipeline for the index')
|
parser.add_argument('--pipeline', help='Use an ingest pipeline for the index')
|
||||||
parser.add_argument('--replicas', type=int, default=1, help='Number of replicas for the index')
|
parser.add_argument('--replicas', type=int, default=1, help='Number of replicas for the index')
|
||||||
parser.add_argument('--shards', type=int, default=3, help='Number of shards for the index')
|
parser.add_argument('--shards', type=int, default=1, help='Number of shards for the index')
|
||||||
|
|
||||||
# Performance arguments
|
# Performance arguments
|
||||||
parser.add_argument('--chunk-max', type=int, default=10, help='Maximum size in MB of a chunk')
|
|
||||||
parser.add_argument('--chunk-size', type=int, default=50000, help='Number of records to index in a chunk')
|
parser.add_argument('--chunk-size', type=int, default=50000, help='Number of records to index in a chunk')
|
||||||
parser.add_argument('--chunk-threads', type=int, default=3, help='Number of threads to use when indexing in chunks')
|
parser.add_argument('--chunk-max', type=int, default=100, help='Maximum size of a chunk in bytes')
|
||||||
parser.add_argument('--retries', type=int, default=60, help='Number of times to retry indexing a chunk before failing')
|
parser.add_argument('--retries', type=int, default=100, help='Number of times to retry indexing a chunk before failing')
|
||||||
parser.add_argument('--timeout', type=int, default=30, help='Number of seconds to wait before retrying a chunk')
|
parser.add_argument('--timeout', type=int, default=60, help='Number of seconds to wait before retrying a chunk')
|
||||||
|
|
||||||
# Ingestion arguments
|
# Ingestion arguments
|
||||||
|
parser.add_argument('--cert', action='store_true', help='Index Certstream records')
|
||||||
parser.add_argument('--httpx', action='store_true', help='Index Httpx records')
|
parser.add_argument('--httpx', action='store_true', help='Index Httpx records')
|
||||||
parser.add_argument('--masscan', action='store_true', help='Index Masscan records')
|
parser.add_argument('--masscan', action='store_true', help='Index Masscan records')
|
||||||
parser.add_argument('--massdns', action='store_true', help='Index Massdns records')
|
parser.add_argument('--massdns', action='store_true', help='Index Massdns records')
|
||||||
@ -232,7 +194,10 @@ def main():
|
|||||||
raise FileNotFoundError(f'Input path {args.input_path} does not exist or is not a file or directory')
|
raise FileNotFoundError(f'Input path {args.input_path} does not exist or is not a file or directory')
|
||||||
|
|
||||||
edx = ElasticIndexer(args)
|
edx = ElasticIndexer(args)
|
||||||
|
await edx.initialize() # Initialize the Elasticsearch client asyncronously
|
||||||
|
|
||||||
|
if args.cert:
|
||||||
|
from ingestors import ingest_certs as ingestor
|
||||||
if args.httpx:
|
if args.httpx:
|
||||||
from ingestors import ingest_httpx as ingestor
|
from ingestors import ingest_httpx as ingestor
|
||||||
elif args.masscan:
|
elif args.masscan:
|
||||||
@ -241,32 +206,28 @@ def main():
|
|||||||
from ingestors import ingest_massdns as ingestor
|
from ingestors import ingest_massdns as ingestor
|
||||||
elif args.zone:
|
elif args.zone:
|
||||||
from ingestors import ingest_zone as ingestor
|
from ingestors import ingest_zone as ingestor
|
||||||
|
|
||||||
batch_size = 0
|
|
||||||
|
|
||||||
if not args.dry_run:
|
health = await edx.get_cluster_health()
|
||||||
print(edx.get_cluster_health())
|
print(health)
|
||||||
|
|
||||||
time.sleep(3) # Delay to allow time for sniffing to complete
|
await asyncio.sleep(5) # Delay to allow time for sniffing to complete
|
||||||
|
|
||||||
nodes = edx.get_cluster_size()
|
nodes = await edx.get_cluster_size()
|
||||||
logging.info(f'Connected to {nodes:,} Elasticsearch node(s)')
|
logging.info(f'Connected to {nodes:,} Elasticsearch node(s)')
|
||||||
|
|
||||||
if not edx.es_index:
|
if not edx.es_index:
|
||||||
edx.es_index = ingestor.default_index
|
edx.es_index = ingestor.default_index
|
||||||
|
|
||||||
map_body = ingestor.construct_map()
|
map_body = ingestor.construct_map()
|
||||||
edx.create_index(map_body, args.pipeline, args.replicas, args.shards)
|
await edx.create_index(map_body, args.pipeline, args.replicas, args.shards)
|
||||||
|
|
||||||
batch_size = int(nodes * (args.chunk_size * args.chunk_threads))
|
|
||||||
|
|
||||||
if os.path.isfile(args.input_path):
|
if os.path.isfile(args.input_path):
|
||||||
logging.info(f'Processing file: {args.input_path}')
|
logging.info(f'Processing file: {args.input_path}')
|
||||||
edx.process_file(args.input_path, batch_size, ingestor.process_file)
|
await edx.process_data(args.input_path, ingestor.process_data)
|
||||||
|
|
||||||
elif stat.S_ISFIFO(os.stat(args.input_path).st_mode):
|
elif stat.S_ISFIFO(os.stat(args.input_path).st_mode):
|
||||||
logging.info(f'Watching FIFO: {args.input_path}')
|
logging.info(f'Watching FIFO: {args.input_path}')
|
||||||
edx.process_file(args.input_path, batch_size, ingestor.process_file)
|
await edx.process_data(args.input_path, ingestor.process_data)
|
||||||
|
|
||||||
elif os.path.isdir(args.input_path):
|
elif os.path.isdir(args.input_path):
|
||||||
count = 1
|
count = 1
|
||||||
@ -276,7 +237,7 @@ def main():
|
|||||||
file_path = os.path.join(args.input_path, file)
|
file_path = os.path.join(args.input_path, file)
|
||||||
if os.path.isfile(file_path):
|
if os.path.isfile(file_path):
|
||||||
logging.info(f'[{count:,}/{total:,}] Processing file: {file_path}')
|
logging.info(f'[{count:,}/{total:,}] Processing file: {file_path}')
|
||||||
edx.process_file(file_path, batch_size, ingestor.process_file)
|
await edx.process_data(file_path, ingestor.process_data)
|
||||||
count += 1
|
count += 1
|
||||||
else:
|
else:
|
||||||
logging.warning(f'[{count:,}/{total:,}] Skipping non-file: {file_path}')
|
logging.warning(f'[{count:,}/{total:,}] Skipping non-file: {file_path}')
|
||||||
@ -284,4 +245,4 @@ def main():
|
|||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
asyncio.run(main())
|
@ -4,6 +4,11 @@
|
|||||||
|
|
||||||
import json
|
import json
|
||||||
|
|
||||||
|
try:
|
||||||
|
import aiofiles
|
||||||
|
except ImportError:
|
||||||
|
raise ImportError('Missing required \'aiofiles\' library. (pip install aiofiles)')
|
||||||
|
|
||||||
default_index = 'httpx-logs'
|
default_index = 'httpx-logs'
|
||||||
|
|
||||||
def construct_map() -> dict:
|
def construct_map() -> dict:
|
||||||
@ -22,15 +27,15 @@ def construct_map() -> dict:
|
|||||||
return mapping
|
return mapping
|
||||||
|
|
||||||
|
|
||||||
def process_file(file_path: str):
|
async def process_data(file_path: str):
|
||||||
'''
|
'''
|
||||||
Read and process HTTPX records from the log file.
|
Read and process HTTPX records from the log file.
|
||||||
|
|
||||||
:param file_path: Path to the HTTPX log file
|
:param file_path: Path to the HTTPX log file
|
||||||
'''
|
'''
|
||||||
|
|
||||||
with open(file_path, 'r') as file:
|
async with aiofiles.open(file_path, mode='r') as input_file:
|
||||||
for line in file:
|
async for line in input_file:
|
||||||
line = line.strip()
|
line = line.strip()
|
||||||
|
|
||||||
if not line:
|
if not line:
|
||||||
@ -43,7 +48,7 @@ def process_file(file_path: str):
|
|||||||
|
|
||||||
del record['failed'], record['knowledgebase'], record['time']
|
del record['failed'], record['knowledgebase'], record['time']
|
||||||
|
|
||||||
yield record
|
yield {'_index': default_index, '_source': record}
|
||||||
|
|
||||||
return None # EOF
|
return None # EOF
|
||||||
|
|
||||||
|
@ -19,6 +19,11 @@ import logging
|
|||||||
import re
|
import re
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
try:
|
||||||
|
import aiofiles
|
||||||
|
except ImportError:
|
||||||
|
raise ImportError('Missing required \'aiofiles\' library. (pip install aiofiles)')
|
||||||
|
|
||||||
default_index = 'masscan-logs'
|
default_index = 'masscan-logs'
|
||||||
|
|
||||||
def construct_map() -> dict:
|
def construct_map() -> dict:
|
||||||
@ -54,15 +59,15 @@ def construct_map() -> dict:
|
|||||||
return mapping
|
return mapping
|
||||||
|
|
||||||
|
|
||||||
def process_file(file_path: str):
|
async def process_data(file_path: str):
|
||||||
'''
|
'''
|
||||||
Read and process Masscan records from the log file.
|
Read and process Masscan records from the log file.
|
||||||
|
|
||||||
:param file_path: Path to the Masscan log file
|
:param file_path: Path to the Masscan log file
|
||||||
'''
|
'''
|
||||||
|
|
||||||
with open(file_path, 'r') as file:
|
async with aiofiles.open(file_path, mode='r') as input_file:
|
||||||
for line in file:
|
async for line in input_file:
|
||||||
line = line.strip()
|
line = line.strip()
|
||||||
|
|
||||||
if not line or not line.startswith('{'):
|
if not line or not line.startswith('{'):
|
||||||
@ -74,22 +79,29 @@ def process_file(file_path: str):
|
|||||||
try:
|
try:
|
||||||
record = json.loads(line)
|
record = json.loads(line)
|
||||||
except json.decoder.JSONDecodeError:
|
except json.decoder.JSONDecodeError:
|
||||||
|
# In rare cases, the JSON record may be incomplete or malformed:
|
||||||
|
# { "ip": "51.161.12.223", "timestamp": "1707628302", "ports": [ {"port": 22, "proto": "tcp", "service": {"name": "ssh", "banner":
|
||||||
|
# { "ip": "83.66.211.246", "timestamp": "1706557002"
|
||||||
logging.error(f'Failed to parse JSON record! ({line})')
|
logging.error(f'Failed to parse JSON record! ({line})')
|
||||||
input('Press Enter to continue...') # Debugging
|
input('Press Enter to continue...') # Pause for review & debugging (Will remove pausing in production, still investigating the cause of this issue.)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
if len(record['ports']) > 1:
|
||||||
|
logging.warning(f'Multiple ports found for record! ({record})')
|
||||||
|
input('Press Enter to continue...') # Pause for review (Will remove pausing in production, still investigating if you ever seen more than one port in a record.)
|
||||||
|
|
||||||
for port_info in record['ports']:
|
for port_info in record['ports']:
|
||||||
struct = {
|
struct = {
|
||||||
'ip': record['ip'],
|
'ip' : record['ip'],
|
||||||
'port': port_info['port'],
|
'port' : port_info['port'],
|
||||||
'proto': port_info['proto'],
|
'proto' : port_info['proto'],
|
||||||
'seen': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(int(record['timestamp']))),
|
'seen' : time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(int(record['timestamp']))),
|
||||||
}
|
}
|
||||||
|
|
||||||
if 'service' in port_info:
|
if 'service' in port_info:
|
||||||
if 'name' in port_info['service']:
|
if 'name' in port_info['service']:
|
||||||
if port_info['service']['name'] != 'unknown':
|
if (service_name := port_info['service']['name']) not in ('unknown',''):
|
||||||
struct['service'] = port_info['service']['name']
|
struct['service'] = service_name
|
||||||
|
|
||||||
if 'banner' in port_info['service']:
|
if 'banner' in port_info['service']:
|
||||||
banner = ' '.join(port_info['service']['banner'].split()) # Remove extra whitespace
|
banner = ' '.join(port_info['service']['banner'].split()) # Remove extra whitespace
|
||||||
@ -100,7 +112,7 @@ def process_file(file_path: str):
|
|||||||
else:
|
else:
|
||||||
struct['banner'] = banner
|
struct['banner'] = banner
|
||||||
|
|
||||||
yield struct
|
yield {'_index': default_index, '_source': struct}
|
||||||
|
|
||||||
return None # EOF
|
return None # EOF
|
||||||
|
|
||||||
@ -131,6 +143,6 @@ Will be indexed as:
|
|||||||
"service": "ssh",
|
"service": "ssh",
|
||||||
"banner": "SSH-2.0-OpenSSH_8.9p1 Ubuntu-3ubuntu0.4",
|
"banner": "SSH-2.0-OpenSSH_8.9p1 Ubuntu-3ubuntu0.4",
|
||||||
"seen": "2021-10-08T02:04:28Z",
|
"seen": "2021-10-08T02:04:28Z",
|
||||||
"ref_id": "?sKfOvsC4M4a2W8PaC4zF?" # TCP RST Payload (Do we need this?)
|
"ref_id": "?sKfOvsC4M4a2W8PaC4zF?" # TCP RST Payload, Might be useful..
|
||||||
}
|
}
|
||||||
'''
|
'''
|
@ -4,6 +4,11 @@
|
|||||||
|
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
try:
|
||||||
|
import aiofiles
|
||||||
|
except ImportError:
|
||||||
|
raise ImportError('Missing required \'aiofiles\' library. (pip install aiofiles)')
|
||||||
|
|
||||||
default_index = 'ptr-records'
|
default_index = 'ptr-records'
|
||||||
|
|
||||||
def construct_map() -> dict:
|
def construct_map() -> dict:
|
||||||
@ -25,15 +30,15 @@ def construct_map() -> dict:
|
|||||||
return mapping
|
return mapping
|
||||||
|
|
||||||
|
|
||||||
def process_file(file_path: str):
|
async def process_data(file_path: str):
|
||||||
'''
|
'''
|
||||||
Read and process Massdns records from the log file.
|
Read and process Massdns records from the log file.
|
||||||
|
|
||||||
:param file_path: Path to the Massdns log file
|
:param file_path: Path to the Massdns log file
|
||||||
'''
|
'''
|
||||||
|
|
||||||
with open(file_path, 'r') as file:
|
async with aiofiles.open(file_path, mode='r') as input_file:
|
||||||
for line in file:
|
async for line in input_file:
|
||||||
line = line.strip()
|
line = line.strip()
|
||||||
|
|
||||||
if not line:
|
if not line:
|
||||||
@ -65,7 +70,7 @@ def process_file(file_path: str):
|
|||||||
'seen': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
|
'seen': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
|
||||||
}
|
}
|
||||||
|
|
||||||
yield struct
|
yield {'_index': default_index, '_source': struct}
|
||||||
|
|
||||||
return None # EOF
|
return None # EOF
|
||||||
|
|
||||||
|
@ -4,6 +4,11 @@
|
|||||||
|
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
try:
|
||||||
|
import aiofiles
|
||||||
|
except ImportError:
|
||||||
|
raise ImportError('Missing required \'aiofiles\' library. (pip install aiofiles)')
|
||||||
|
|
||||||
default_index = 'dns-zones'
|
default_index = 'dns-zones'
|
||||||
record_types = ('a','aaaa','caa','cdnskey','cds','cname','dnskey','ds','mx','naptr','ns','nsec','nsec3','nsec3param','ptr','rrsig','rp','sshfp','soa','srv','txt','type65534')
|
record_types = ('a','aaaa','caa','cdnskey','cds','cname','dnskey','ds','mx','naptr','ns','nsec','nsec3','nsec3param','ptr','rrsig','rp','sshfp','soa','srv','txt','type65534')
|
||||||
|
|
||||||
@ -42,7 +47,7 @@ def construct_map() -> dict:
|
|||||||
return mapping
|
return mapping
|
||||||
|
|
||||||
|
|
||||||
def process_file(file_path: str):
|
async def process_data(file_path: str):
|
||||||
'''
|
'''
|
||||||
Read and process zone file records.
|
Read and process zone file records.
|
||||||
|
|
||||||
@ -52,8 +57,8 @@ def process_file(file_path: str):
|
|||||||
domain_records = {}
|
domain_records = {}
|
||||||
last_domain = None
|
last_domain = None
|
||||||
|
|
||||||
with open(file_path, 'r') as file:
|
async with aiofiles.open(file_path, mode='r') as input_file:
|
||||||
for line in file:
|
async for line in input_file:
|
||||||
line = line.strip()
|
line = line.strip()
|
||||||
|
|
||||||
if not line or line.startswith(';'):
|
if not line or line.startswith(';'):
|
||||||
@ -88,11 +93,11 @@ def process_file(file_path: str):
|
|||||||
|
|
||||||
if domain != last_domain:
|
if domain != last_domain:
|
||||||
if last_domain:
|
if last_domain:
|
||||||
source = {'domain': last_domain, 'records': domain_records[last_domain], 'seen': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())}
|
struct = {'domain': last_domain, 'records': domain_records[last_domain], 'seen': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())}
|
||||||
|
|
||||||
del domain_records[last_domain]
|
del domain_records[last_domain]
|
||||||
|
|
||||||
yield source
|
yield {'_index': default_index, '_source': struct}
|
||||||
|
|
||||||
last_domain = domain
|
last_domain = domain
|
||||||
|
|
||||||
|
@ -1,21 +1,18 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# Elasticsearch Recon Ingestion Scripts (ERIS) - Developed by Acidvegas (https://git.acid.vegas/eris)
|
# Elasticsearch Recon Ingestion Scripts (ERIS) - Developed by Acidvegas (https://git.acid.vegas/eris)
|
||||||
# eris.py
|
|
||||||
|
|
||||||
import asyncio
|
|
||||||
import argparse
|
import argparse
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import stat
|
import stat
|
||||||
|
import time
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
sys.dont_write_bytecode = True
|
sys.dont_write_bytecode = True
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# This is commented out because there is a bug with the elasticsearch library that requires a patch (see initialize() method below)
|
from elasticsearch import Elasticsearch, helpers
|
||||||
#from elasticsearch import AsyncElasticsearch
|
|
||||||
from elasticsearch.exceptions import NotFoundError
|
from elasticsearch.exceptions import NotFoundError
|
||||||
from elasticsearch.helpers import async_streaming_bulk
|
|
||||||
except ImportError:
|
except ImportError:
|
||||||
raise ImportError('Missing required \'elasticsearch\' library. (pip install elasticsearch)')
|
raise ImportError('Missing required \'elasticsearch\' library. (pip install elasticsearch)')
|
||||||
|
|
||||||
@ -31,51 +28,48 @@ class ElasticIndexer:
|
|||||||
:param args: Parsed arguments from argparse
|
:param args: Parsed arguments from argparse
|
||||||
'''
|
'''
|
||||||
|
|
||||||
self.chunk_max = args.chunk_max * 1024 * 1024 # MB
|
self.chunk_max = args.chunk_max * 1024 * 1024 # MB
|
||||||
self.chunk_size = args.chunk_size
|
self.chunk_size = args.chunk_size
|
||||||
self.es = None
|
self.chunk_threads = args.chunk_threads
|
||||||
|
self.dry_run = args.dry_run
|
||||||
self.es_index = args.index
|
self.es_index = args.index
|
||||||
|
|
||||||
self.es_config = {
|
if not args.dry_run:
|
||||||
'hosts': [f'{args.host}:{args.port}'],
|
es_config = {
|
||||||
'verify_certs': args.self_signed,
|
'hosts': [f'{args.host}:{args.port}'],
|
||||||
'ssl_show_warn': args.self_signed,
|
'verify_certs': args.self_signed,
|
||||||
'request_timeout': args.timeout,
|
'ssl_show_warn': args.self_signed,
|
||||||
'max_retries': args.retries,
|
'request_timeout': args.timeout,
|
||||||
'retry_on_timeout': True,
|
'max_retries': args.retries,
|
||||||
'sniff_on_start': True, # Is this problematic?
|
'retry_on_timeout': True,
|
||||||
'sniff_on_node_failure': True,
|
'sniff_on_start': False,
|
||||||
'min_delay_between_sniffing': 60 # Add config option for this?
|
'sniff_on_node_failure': True,
|
||||||
}
|
'min_delay_between_sniffing': 60 # Add config option for this?
|
||||||
|
}
|
||||||
|
|
||||||
if args.api_key:
|
if args.api_key:
|
||||||
self.es_config['api_key'] = (args.api_key, '') # Verify this is correct
|
es_config['headers'] = {'Authorization': f'ApiKey {args.api_key}'}
|
||||||
else:
|
else:
|
||||||
self.es_config['basic_auth'] = (args.user, args.password)
|
es_config['basic_auth'] = (args.user, args.password)
|
||||||
|
|
||||||
|
# Patching the Elasticsearch client to fix a bug with sniffing (https://github.com/elastic/elasticsearch-py/issues/2005#issuecomment-1645641960)
|
||||||
async def initialize(self):
|
import sniff_patch
|
||||||
'''Initialize the Elasticsearch client.'''
|
self.es = sniff_patch.init_elasticsearch(**es_config)
|
||||||
|
|
||||||
# Patching the Elasticsearch client to fix a bug with sniffing (https://github.com/elastic/elasticsearch-py/issues/2005#issuecomment-1645641960)
|
# Remove the above and uncomment the below if the bug is fixed in the Elasticsearch client:
|
||||||
import sniff_patch
|
#self.es = Elasticsearch(**es_config)
|
||||||
self.es = sniff_patch.init_elasticsearch(**self.es_config)
|
|
||||||
|
|
||||||
# Remove the above and uncomment the below if the bug is fixed in the Elasticsearch client:
|
|
||||||
#self.es = AsyncElasticsearch(**es_config)
|
|
||||||
|
|
||||||
|
|
||||||
async def create_index(self, map_body: dict, pipeline: str = '', replicas: int = 1, shards: int = 1):
|
def create_index(self, map_body: dict, pipeline: str = '', replicas: int = 1, shards: int = 1, ):
|
||||||
'''
|
'''
|
||||||
Create the Elasticsearch index with the defined mapping.
|
Create the Elasticsearch index with the defined mapping.
|
||||||
|
|
||||||
:param map_body: Mapping for the index
|
:param pipline: Name of the ingest pipeline to use for the index
|
||||||
:param pipeline: Name of the ingest pipeline to use for the index
|
|
||||||
:param replicas: Number of replicas for the index
|
:param replicas: Number of replicas for the index
|
||||||
:param shards: Number of shards for the index
|
:param shards: Number of shards for the index
|
||||||
'''
|
'''
|
||||||
|
|
||||||
if await self.es.indices.exists(index=self.es_index):
|
if self.es.indices.exists(index=self.es_index):
|
||||||
logging.info(f'Index \'{self.es_index}\' already exists.')
|
logging.info(f'Index \'{self.es_index}\' already exists.')
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -88,13 +82,13 @@ class ElasticIndexer:
|
|||||||
|
|
||||||
if pipeline:
|
if pipeline:
|
||||||
try:
|
try:
|
||||||
await self.es.ingest.get_pipeline(id=pipeline)
|
self.es.ingest.get_pipeline(id=pipeline)
|
||||||
logging.info(f'Using ingest pipeline \'{pipeline}\' for index \'{self.es_index}\'')
|
logging.info(f'Using ingest pipeline \'{pipeline}\' for index \'{self.es_index}\'')
|
||||||
mapping['settings']['index.default_pipeline'] = pipeline
|
mapping['settings']['index.default_pipeline'] = pipeline
|
||||||
except NotFoundError:
|
except NotFoundError:
|
||||||
raise ValueError(f'Ingest pipeline \'{pipeline}\' does not exist.')
|
raise ValueError(f'Ingest pipeline \'{pipeline}\' does not exist.')
|
||||||
|
|
||||||
response = await self.es.indices.create(index=self.es_index, body=mapping)
|
response = self.es.indices.create(index=self.es_index, body=mapping)
|
||||||
|
|
||||||
if response.get('acknowledged') and response.get('shards_acknowledged'):
|
if response.get('acknowledged') and response.get('shards_acknowledged'):
|
||||||
logging.info(f'Index \'{self.es_index}\' successfully created.')
|
logging.info(f'Index \'{self.es_index}\' successfully created.')
|
||||||
@ -102,62 +96,106 @@ class ElasticIndexer:
|
|||||||
raise Exception(f'Failed to create index. ({response})')
|
raise Exception(f'Failed to create index. ({response})')
|
||||||
|
|
||||||
|
|
||||||
async def get_cluster_health(self) -> dict:
|
def get_cluster_health(self) -> dict:
|
||||||
'''Get the health of the Elasticsearch cluster.'''
|
'''Get the health of the Elasticsearch cluster.'''
|
||||||
|
|
||||||
return await self.es.cluster.health()
|
return self.es.cluster.health()
|
||||||
|
|
||||||
|
|
||||||
async def get_cluster_size(self) -> int:
|
def get_cluster_size(self) -> int:
|
||||||
'''Get the number of nodes in the Elasticsearch cluster.'''
|
'''Get the number of nodes in the Elasticsearch cluster.'''
|
||||||
|
|
||||||
cluster_stats = await self.es.cluster.stats()
|
cluster_stats = self.es.cluster.stats()
|
||||||
number_of_nodes = cluster_stats['nodes']['count']['total']
|
number_of_nodes = cluster_stats['nodes']['count']['total']
|
||||||
|
|
||||||
return number_of_nodes
|
return number_of_nodes
|
||||||
|
|
||||||
|
|
||||||
async def process_data(self, file_path: str, data_generator: callable):
|
def bulk_index(self, documents: list, file_path: str, count: int):
|
||||||
'''
|
'''
|
||||||
Index records in chunks to Elasticsearch.
|
Index a batch of documents to Elasticsearch.
|
||||||
|
|
||||||
|
:param documents: List of documents to index
|
||||||
|
:param file_path: Path to the file being indexed
|
||||||
|
:param count: Total number of records processed
|
||||||
|
'''
|
||||||
|
|
||||||
|
remaining_documents = documents
|
||||||
|
|
||||||
|
parallel_bulk_config = {
|
||||||
|
'client': self.es,
|
||||||
|
'chunk_size': self.chunk_size,
|
||||||
|
'max_chunk_bytes': self.chunk_max,
|
||||||
|
'thread_count': self.chunk_threads,
|
||||||
|
'queue_size': 2 # Add config option for this?
|
||||||
|
}
|
||||||
|
|
||||||
|
while remaining_documents:
|
||||||
|
failed_documents = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
for success, response in helpers.parallel_bulk(actions=remaining_documents, **parallel_bulk_config):
|
||||||
|
if not success:
|
||||||
|
failed_documents.append(response)
|
||||||
|
|
||||||
|
if not failed_documents:
|
||||||
|
ingested = parallel_bulk_config['chunk_size'] * parallel_bulk_config['thread_count']
|
||||||
|
logging.info(f'Successfully indexed {ingested:,} ({count:,} processed) records to {self.es_index} from {file_path}')
|
||||||
|
break
|
||||||
|
|
||||||
|
else:
|
||||||
|
logging.warning(f'Failed to index {len(failed_documents):,} failed documents! Retrying...')
|
||||||
|
remaining_documents = failed_documents
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f'Failed to index documents! ({e})')
|
||||||
|
time.sleep(30) # Should we add a config option for this?
|
||||||
|
|
||||||
|
|
||||||
|
def process_file(self, file_path: str, batch_size: int, ingest_function: callable):
|
||||||
|
'''
|
||||||
|
Read and index records in batches to Elasticsearch.
|
||||||
|
|
||||||
:param file_path: Path to the file
|
:param file_path: Path to the file
|
||||||
:param index_name: Name of the index
|
:param batch_size: Number of records to index per batch
|
||||||
:param data_generator: Generator for the records to index
|
:param ingest_function: Function to process the file
|
||||||
'''
|
'''
|
||||||
|
|
||||||
count = 0
|
count = 0
|
||||||
total = 0
|
records = []
|
||||||
|
|
||||||
async for ok, result in async_streaming_bulk(self.es, actions=data_generator(file_path), chunk_size=self.chunk_size, max_chunk_bytes=self.chunk_max):
|
|
||||||
action, result = result.popitem()
|
|
||||||
|
|
||||||
if not ok:
|
for processed in ingest_function(file_path):
|
||||||
logging.error(f'Failed to index document ({result["_id"]}) to {self.es_index} from {file_path} ({result})')
|
|
||||||
input('Press Enter to continue...') # Debugging (will possibly remove this since we have retries enabled)
|
if not processed:
|
||||||
|
break
|
||||||
|
|
||||||
|
if self.dry_run:
|
||||||
|
print(processed)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
struct = {'_index': self.es_index, '_source': processed}
|
||||||
|
records.append(struct)
|
||||||
count += 1
|
count += 1
|
||||||
total += 1
|
|
||||||
|
if len(records) >= batch_size:
|
||||||
|
self.bulk_index(records, file_path, count)
|
||||||
|
records = []
|
||||||
|
|
||||||
if count == self.chunk_size:
|
if records:
|
||||||
logging.info(f'Successfully indexed {self.chunk_size:,} ({total:,} processed) records to {self.es_index} from {file_path}')
|
self.bulk_index(records, file_path, count)
|
||||||
count = 0
|
|
||||||
|
|
||||||
logging.info(f'Finished indexing {total:,} records to {self.es_index} from {file_path}')
|
|
||||||
|
|
||||||
|
|
||||||
async def main():
|
def main():
|
||||||
'''Main function when running this script directly.'''
|
'''Main function when running this script directly.'''
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description='Index data into Elasticsearch.')
|
parser = argparse.ArgumentParser(description='Index data into Elasticsearch.')
|
||||||
|
|
||||||
# General arguments
|
# General arguments
|
||||||
parser.add_argument('input_path', help='Path to the input file or directory') # Required
|
parser.add_argument('input_path', help='Path to the input file or directory') # Required
|
||||||
|
parser.add_argument('--dry-run', action='store_true', help='Dry run (do not index records to Elasticsearch)')
|
||||||
parser.add_argument('--watch', action='store_true', help='Create or watch a FIFO for real-time indexing')
|
parser.add_argument('--watch', action='store_true', help='Create or watch a FIFO for real-time indexing')
|
||||||
|
|
||||||
# Elasticsearch arguments
|
# Elasticsearch arguments
|
||||||
parser.add_argument('--host', default='http://localhost/', help='Elasticsearch host')
|
parser.add_argument('--host', default='localhost', help='Elasticsearch host')
|
||||||
parser.add_argument('--port', type=int, default=9200, help='Elasticsearch port')
|
parser.add_argument('--port', type=int, default=9200, help='Elasticsearch port')
|
||||||
parser.add_argument('--user', default='elastic', help='Elasticsearch username')
|
parser.add_argument('--user', default='elastic', help='Elasticsearch username')
|
||||||
parser.add_argument('--password', default=os.getenv('ES_PASSWORD'), help='Elasticsearch password (if not provided, check environment variable ES_PASSWORD)')
|
parser.add_argument('--password', default=os.getenv('ES_PASSWORD'), help='Elasticsearch password (if not provided, check environment variable ES_PASSWORD)')
|
||||||
@ -168,16 +206,16 @@ async def main():
|
|||||||
parser.add_argument('--index', help='Elasticsearch index name')
|
parser.add_argument('--index', help='Elasticsearch index name')
|
||||||
parser.add_argument('--pipeline', help='Use an ingest pipeline for the index')
|
parser.add_argument('--pipeline', help='Use an ingest pipeline for the index')
|
||||||
parser.add_argument('--replicas', type=int, default=1, help='Number of replicas for the index')
|
parser.add_argument('--replicas', type=int, default=1, help='Number of replicas for the index')
|
||||||
parser.add_argument('--shards', type=int, default=1, help='Number of shards for the index')
|
parser.add_argument('--shards', type=int, default=3, help='Number of shards for the index')
|
||||||
|
|
||||||
# Performance arguments
|
# Performance arguments
|
||||||
|
parser.add_argument('--chunk-max', type=int, default=10, help='Maximum size in MB of a chunk')
|
||||||
parser.add_argument('--chunk-size', type=int, default=50000, help='Number of records to index in a chunk')
|
parser.add_argument('--chunk-size', type=int, default=50000, help='Number of records to index in a chunk')
|
||||||
parser.add_argument('--chunk-max', type=int, default=100, help='Maximum size of a chunk in bytes')
|
parser.add_argument('--chunk-threads', type=int, default=3, help='Number of threads to use when indexing in chunks')
|
||||||
parser.add_argument('--retries', type=int, default=100, help='Number of times to retry indexing a chunk before failing')
|
parser.add_argument('--retries', type=int, default=60, help='Number of times to retry indexing a chunk before failing')
|
||||||
parser.add_argument('--timeout', type=int, default=60, help='Number of seconds to wait before retrying a chunk')
|
parser.add_argument('--timeout', type=int, default=30, help='Number of seconds to wait before retrying a chunk')
|
||||||
|
|
||||||
# Ingestion arguments
|
# Ingestion arguments
|
||||||
parser.add_argument('--cert', action='store_true', help='Index Certstream records')
|
|
||||||
parser.add_argument('--httpx', action='store_true', help='Index Httpx records')
|
parser.add_argument('--httpx', action='store_true', help='Index Httpx records')
|
||||||
parser.add_argument('--masscan', action='store_true', help='Index Masscan records')
|
parser.add_argument('--masscan', action='store_true', help='Index Masscan records')
|
||||||
parser.add_argument('--massdns', action='store_true', help='Index Massdns records')
|
parser.add_argument('--massdns', action='store_true', help='Index Massdns records')
|
||||||
@ -194,10 +232,7 @@ async def main():
|
|||||||
raise FileNotFoundError(f'Input path {args.input_path} does not exist or is not a file or directory')
|
raise FileNotFoundError(f'Input path {args.input_path} does not exist or is not a file or directory')
|
||||||
|
|
||||||
edx = ElasticIndexer(args)
|
edx = ElasticIndexer(args)
|
||||||
await edx.initialize() # Initialize the Elasticsearch client asyncronously
|
|
||||||
|
|
||||||
if args.cert:
|
|
||||||
from ingestors import ingest_certs as ingestor
|
|
||||||
if args.httpx:
|
if args.httpx:
|
||||||
from ingestors import ingest_httpx as ingestor
|
from ingestors import ingest_httpx as ingestor
|
||||||
elif args.masscan:
|
elif args.masscan:
|
||||||
@ -206,28 +241,32 @@ async def main():
|
|||||||
from ingestors import ingest_massdns as ingestor
|
from ingestors import ingest_massdns as ingestor
|
||||||
elif args.zone:
|
elif args.zone:
|
||||||
from ingestors import ingest_zone as ingestor
|
from ingestors import ingest_zone as ingestor
|
||||||
|
|
||||||
|
batch_size = 0
|
||||||
|
|
||||||
health = await edx.get_cluster_health()
|
if not args.dry_run:
|
||||||
print(health)
|
print(edx.get_cluster_health())
|
||||||
|
|
||||||
await asyncio.sleep(5) # Delay to allow time for sniffing to complete
|
time.sleep(3) # Delay to allow time for sniffing to complete
|
||||||
|
|
||||||
nodes = await edx.get_cluster_size()
|
nodes = edx.get_cluster_size()
|
||||||
logging.info(f'Connected to {nodes:,} Elasticsearch node(s)')
|
logging.info(f'Connected to {nodes:,} Elasticsearch node(s)')
|
||||||
|
|
||||||
if not edx.es_index:
|
if not edx.es_index:
|
||||||
edx.es_index = ingestor.default_index
|
edx.es_index = ingestor.default_index
|
||||||
|
|
||||||
map_body = ingestor.construct_map()
|
map_body = ingestor.construct_map()
|
||||||
await edx.create_index(map_body, args.pipeline, args.replicas, args.shards)
|
edx.create_index(map_body, args.pipeline, args.replicas, args.shards)
|
||||||
|
|
||||||
|
batch_size = int(nodes * (args.chunk_size * args.chunk_threads))
|
||||||
|
|
||||||
if os.path.isfile(args.input_path):
|
if os.path.isfile(args.input_path):
|
||||||
logging.info(f'Processing file: {args.input_path}')
|
logging.info(f'Processing file: {args.input_path}')
|
||||||
await edx.process_data(args.input_path, ingestor.process_data)
|
edx.process_file(args.input_path, batch_size, ingestor.process_file)
|
||||||
|
|
||||||
elif stat.S_ISFIFO(os.stat(args.input_path).st_mode):
|
elif stat.S_ISFIFO(os.stat(args.input_path).st_mode):
|
||||||
logging.info(f'Watching FIFO: {args.input_path}')
|
logging.info(f'Watching FIFO: {args.input_path}')
|
||||||
await edx.process_data(args.input_path, ingestor.process_data)
|
edx.process_file(args.input_path, batch_size, ingestor.process_file)
|
||||||
|
|
||||||
elif os.path.isdir(args.input_path):
|
elif os.path.isdir(args.input_path):
|
||||||
count = 1
|
count = 1
|
||||||
@ -237,7 +276,7 @@ async def main():
|
|||||||
file_path = os.path.join(args.input_path, file)
|
file_path = os.path.join(args.input_path, file)
|
||||||
if os.path.isfile(file_path):
|
if os.path.isfile(file_path):
|
||||||
logging.info(f'[{count:,}/{total:,}] Processing file: {file_path}')
|
logging.info(f'[{count:,}/{total:,}] Processing file: {file_path}')
|
||||||
await edx.process_data(file_path, ingestor.process_data)
|
edx.process_file(file_path, batch_size, ingestor.process_file)
|
||||||
count += 1
|
count += 1
|
||||||
else:
|
else:
|
||||||
logging.warning(f'[{count:,}/{total:,}] Skipping non-file: {file_path}')
|
logging.warning(f'[{count:,}/{total:,}] Skipping non-file: {file_path}')
|
||||||
@ -245,4 +284,4 @@ async def main():
|
|||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
asyncio.run(main())
|
main()
|
@ -4,11 +4,6 @@
|
|||||||
|
|
||||||
import json
|
import json
|
||||||
|
|
||||||
try:
|
|
||||||
import aiofiles
|
|
||||||
except ImportError:
|
|
||||||
raise ImportError('Missing required \'aiofiles\' library. (pip install aiofiles)')
|
|
||||||
|
|
||||||
default_index = 'httpx-logs'
|
default_index = 'httpx-logs'
|
||||||
|
|
||||||
def construct_map() -> dict:
|
def construct_map() -> dict:
|
||||||
@ -27,15 +22,15 @@ def construct_map() -> dict:
|
|||||||
return mapping
|
return mapping
|
||||||
|
|
||||||
|
|
||||||
async def process_data(file_path: str):
|
def process_file(file_path: str):
|
||||||
'''
|
'''
|
||||||
Read and process HTTPX records from the log file.
|
Read and process HTTPX records from the log file.
|
||||||
|
|
||||||
:param file_path: Path to the HTTPX log file
|
:param file_path: Path to the HTTPX log file
|
||||||
'''
|
'''
|
||||||
|
|
||||||
async with aiofiles.open(file_path, mode='r') as input_file:
|
with open(file_path, 'r') as file:
|
||||||
async for line in input_file:
|
for line in file:
|
||||||
line = line.strip()
|
line = line.strip()
|
||||||
|
|
||||||
if not line:
|
if not line:
|
||||||
@ -48,7 +43,7 @@ async def process_data(file_path: str):
|
|||||||
|
|
||||||
del record['failed'], record['knowledgebase'], record['time']
|
del record['failed'], record['knowledgebase'], record['time']
|
||||||
|
|
||||||
yield {'_index': default_index, '_source': record}
|
yield record
|
||||||
|
|
||||||
return None # EOF
|
return None # EOF
|
||||||
|
|
@ -19,11 +19,6 @@ import logging
|
|||||||
import re
|
import re
|
||||||
import time
|
import time
|
||||||
|
|
||||||
try:
|
|
||||||
import aiofiles
|
|
||||||
except ImportError:
|
|
||||||
raise ImportError('Missing required \'aiofiles\' library. (pip install aiofiles)')
|
|
||||||
|
|
||||||
default_index = 'masscan-logs'
|
default_index = 'masscan-logs'
|
||||||
|
|
||||||
def construct_map() -> dict:
|
def construct_map() -> dict:
|
||||||
@ -59,15 +54,15 @@ def construct_map() -> dict:
|
|||||||
return mapping
|
return mapping
|
||||||
|
|
||||||
|
|
||||||
async def process_data(file_path: str):
|
def process_file(file_path: str):
|
||||||
'''
|
'''
|
||||||
Read and process Masscan records from the log file.
|
Read and process Masscan records from the log file.
|
||||||
|
|
||||||
:param file_path: Path to the Masscan log file
|
:param file_path: Path to the Masscan log file
|
||||||
'''
|
'''
|
||||||
|
|
||||||
async with aiofiles.open(file_path, mode='r') as input_file:
|
with open(file_path, 'r') as file:
|
||||||
async for line in input_file:
|
for line in file:
|
||||||
line = line.strip()
|
line = line.strip()
|
||||||
|
|
||||||
if not line or not line.startswith('{'):
|
if not line or not line.startswith('{'):
|
||||||
@ -79,29 +74,22 @@ async def process_data(file_path: str):
|
|||||||
try:
|
try:
|
||||||
record = json.loads(line)
|
record = json.loads(line)
|
||||||
except json.decoder.JSONDecodeError:
|
except json.decoder.JSONDecodeError:
|
||||||
# In rare cases, the JSON record may be incomplete or malformed:
|
|
||||||
# { "ip": "51.161.12.223", "timestamp": "1707628302", "ports": [ {"port": 22, "proto": "tcp", "service": {"name": "ssh", "banner":
|
|
||||||
# { "ip": "83.66.211.246", "timestamp": "1706557002"
|
|
||||||
logging.error(f'Failed to parse JSON record! ({line})')
|
logging.error(f'Failed to parse JSON record! ({line})')
|
||||||
input('Press Enter to continue...') # Pause for review & debugging (Will remove pausing in production, still investigating the cause of this issue.)
|
input('Press Enter to continue...') # Debugging
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if len(record['ports']) > 1:
|
|
||||||
logging.warning(f'Multiple ports found for record! ({record})')
|
|
||||||
input('Press Enter to continue...') # Pause for review (Will remove pausing in production, still investigating if you ever seen more than one port in a record.)
|
|
||||||
|
|
||||||
for port_info in record['ports']:
|
for port_info in record['ports']:
|
||||||
struct = {
|
struct = {
|
||||||
'ip' : record['ip'],
|
'ip': record['ip'],
|
||||||
'port' : port_info['port'],
|
'port': port_info['port'],
|
||||||
'proto' : port_info['proto'],
|
'proto': port_info['proto'],
|
||||||
'seen' : time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(int(record['timestamp']))),
|
'seen': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(int(record['timestamp']))),
|
||||||
}
|
}
|
||||||
|
|
||||||
if 'service' in port_info:
|
if 'service' in port_info:
|
||||||
if 'name' in port_info['service']:
|
if 'name' in port_info['service']:
|
||||||
if (service_name := port_info['service']['name']) not in ('unknown',''):
|
if port_info['service']['name'] != 'unknown':
|
||||||
struct['service'] = service_name
|
struct['service'] = port_info['service']['name']
|
||||||
|
|
||||||
if 'banner' in port_info['service']:
|
if 'banner' in port_info['service']:
|
||||||
banner = ' '.join(port_info['service']['banner'].split()) # Remove extra whitespace
|
banner = ' '.join(port_info['service']['banner'].split()) # Remove extra whitespace
|
||||||
@ -112,7 +100,7 @@ async def process_data(file_path: str):
|
|||||||
else:
|
else:
|
||||||
struct['banner'] = banner
|
struct['banner'] = banner
|
||||||
|
|
||||||
yield {'_index': default_index, '_source': struct}
|
yield struct
|
||||||
|
|
||||||
return None # EOF
|
return None # EOF
|
||||||
|
|
||||||
@ -143,6 +131,6 @@ Will be indexed as:
|
|||||||
"service": "ssh",
|
"service": "ssh",
|
||||||
"banner": "SSH-2.0-OpenSSH_8.9p1 Ubuntu-3ubuntu0.4",
|
"banner": "SSH-2.0-OpenSSH_8.9p1 Ubuntu-3ubuntu0.4",
|
||||||
"seen": "2021-10-08T02:04:28Z",
|
"seen": "2021-10-08T02:04:28Z",
|
||||||
"ref_id": "?sKfOvsC4M4a2W8PaC4zF?" # TCP RST Payload, Might be useful..
|
"ref_id": "?sKfOvsC4M4a2W8PaC4zF?" # TCP RST Payload (Do we need this?)
|
||||||
}
|
}
|
||||||
'''
|
'''
|
@ -4,11 +4,6 @@
|
|||||||
|
|
||||||
import time
|
import time
|
||||||
|
|
||||||
try:
|
|
||||||
import aiofiles
|
|
||||||
except ImportError:
|
|
||||||
raise ImportError('Missing required \'aiofiles\' library. (pip install aiofiles)')
|
|
||||||
|
|
||||||
default_index = 'ptr-records'
|
default_index = 'ptr-records'
|
||||||
|
|
||||||
def construct_map() -> dict:
|
def construct_map() -> dict:
|
||||||
@ -30,15 +25,15 @@ def construct_map() -> dict:
|
|||||||
return mapping
|
return mapping
|
||||||
|
|
||||||
|
|
||||||
async def process_data(file_path: str):
|
def process_file(file_path: str):
|
||||||
'''
|
'''
|
||||||
Read and process Massdns records from the log file.
|
Read and process Massdns records from the log file.
|
||||||
|
|
||||||
:param file_path: Path to the Massdns log file
|
:param file_path: Path to the Massdns log file
|
||||||
'''
|
'''
|
||||||
|
|
||||||
async with aiofiles.open(file_path, mode='r') as input_file:
|
with open(file_path, 'r') as file:
|
||||||
async for line in input_file:
|
for line in file:
|
||||||
line = line.strip()
|
line = line.strip()
|
||||||
|
|
||||||
if not line:
|
if not line:
|
||||||
@ -70,7 +65,7 @@ async def process_data(file_path: str):
|
|||||||
'seen': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
|
'seen': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
|
||||||
}
|
}
|
||||||
|
|
||||||
yield {'_index': default_index, '_source': struct}
|
yield struct
|
||||||
|
|
||||||
return None # EOF
|
return None # EOF
|
||||||
|
|
@ -4,11 +4,6 @@
|
|||||||
|
|
||||||
import time
|
import time
|
||||||
|
|
||||||
try:
|
|
||||||
import aiofiles
|
|
||||||
except ImportError:
|
|
||||||
raise ImportError('Missing required \'aiofiles\' library. (pip install aiofiles)')
|
|
||||||
|
|
||||||
default_index = 'dns-zones'
|
default_index = 'dns-zones'
|
||||||
record_types = ('a','aaaa','caa','cdnskey','cds','cname','dnskey','ds','mx','naptr','ns','nsec','nsec3','nsec3param','ptr','rrsig','rp','sshfp','soa','srv','txt','type65534')
|
record_types = ('a','aaaa','caa','cdnskey','cds','cname','dnskey','ds','mx','naptr','ns','nsec','nsec3','nsec3param','ptr','rrsig','rp','sshfp','soa','srv','txt','type65534')
|
||||||
|
|
||||||
@ -47,7 +42,7 @@ def construct_map() -> dict:
|
|||||||
return mapping
|
return mapping
|
||||||
|
|
||||||
|
|
||||||
async def process_data(file_path: str):
|
def process_file(file_path: str):
|
||||||
'''
|
'''
|
||||||
Read and process zone file records.
|
Read and process zone file records.
|
||||||
|
|
||||||
@ -57,8 +52,8 @@ async def process_data(file_path: str):
|
|||||||
domain_records = {}
|
domain_records = {}
|
||||||
last_domain = None
|
last_domain = None
|
||||||
|
|
||||||
async with aiofiles.open(file_path, mode='r') as input_file:
|
with open(file_path, 'r') as file:
|
||||||
async for line in input_file:
|
for line in file:
|
||||||
line = line.strip()
|
line = line.strip()
|
||||||
|
|
||||||
if not line or line.startswith(';'):
|
if not line or line.startswith(';'):
|
||||||
@ -93,11 +88,11 @@ async def process_data(file_path: str):
|
|||||||
|
|
||||||
if domain != last_domain:
|
if domain != last_domain:
|
||||||
if last_domain:
|
if last_domain:
|
||||||
struct = {'domain': last_domain, 'records': domain_records[last_domain], 'seen': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())}
|
source = {'domain': last_domain, 'records': domain_records[last_domain], 'seen': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())}
|
||||||
|
|
||||||
del domain_records[last_domain]
|
del domain_records[last_domain]
|
||||||
|
|
||||||
yield {'_index': default_index, '_source': struct}
|
yield source
|
||||||
|
|
||||||
last_domain = domain
|
last_domain = domain
|
||||||
|
|
@ -1,6 +1,6 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# Elasticsearch Recon Ingestion Scripts (ERIS) - Developed by Acidvegas (https://git.acid.vegas/eris)
|
# Elasticsearch Recon Ingestion Scripts (ERIS) - Developed by Acidvegas (https://git.acid.vegas/eris)
|
||||||
# sniff_patch.py [asyncronous developement]
|
# sniff_patch.py
|
||||||
|
|
||||||
# Note:
|
# Note:
|
||||||
# This is a patch for the elasticsearch 8.x client to fix the sniff_* options.
|
# This is a patch for the elasticsearch 8.x client to fix the sniff_* options.
|
||||||
@ -12,23 +12,23 @@
|
|||||||
|
|
||||||
import base64
|
import base64
|
||||||
|
|
||||||
import elasticsearch._async.client as async_client
|
import elasticsearch._sync.client as client
|
||||||
from elasticsearch.exceptions import SerializationError, ConnectionError
|
from elasticsearch.exceptions import SerializationError, ConnectionError
|
||||||
|
|
||||||
|
|
||||||
async def init_elasticsearch_async(*args, **kwargs):
|
def init_elasticsearch(*args, **kwargs):
|
||||||
'''
|
'''
|
||||||
Initialize the Async Elasticsearch client with the sniff patch.
|
Initialize the Elasticsearch client with the sniff patch.
|
||||||
|
|
||||||
:param args: Async Elasticsearch positional arguments.
|
:param args: Elasticsearch positional arguments.
|
||||||
:param kwargs: Async Elasticsearch keyword arguments.
|
:param kwargs: Elasticsearch keyword arguments.
|
||||||
'''
|
'''
|
||||||
async_client.default_sniff_callback = _override_async_sniff_callback(kwargs['basic_auth'])
|
client.default_sniff_callback = _override_sniff_callback(kwargs['basic_auth'])
|
||||||
|
|
||||||
return async_client.AsyncElasticsearch(*args, **kwargs)
|
return client.Elasticsearch(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
def _override_async_sniff_callback(basic_auth):
|
def _override_sniff_callback(basic_auth):
|
||||||
'''
|
'''
|
||||||
Taken from https://github.com/elastic/elasticsearch-py/blob/8.8/elasticsearch/_sync/client/_base.py#L166
|
Taken from https://github.com/elastic/elasticsearch-py/blob/8.8/elasticsearch/_sync/client/_base.py#L166
|
||||||
Completely unmodified except for adding the auth header to the elastic request.
|
Completely unmodified except for adding the auth header to the elastic request.
|
||||||
@ -38,19 +38,19 @@ def _override_async_sniff_callback(basic_auth):
|
|||||||
- https://github.com/elastic/elasticsearch-py/issues/2005
|
- https://github.com/elastic/elasticsearch-py/issues/2005
|
||||||
'''
|
'''
|
||||||
auth_str = base64.b64encode(':'.join(basic_auth).encode()).decode()
|
auth_str = base64.b64encode(':'.join(basic_auth).encode()).decode()
|
||||||
sniffed_node_callback = async_client._base._default_sniffed_node_callback
|
sniffed_node_callback = client._base._default_sniffed_node_callback
|
||||||
|
|
||||||
async def modified_async_sniff_callback(transport, sniff_options):
|
def modified_sniff_callback(transport, sniff_options):
|
||||||
for _ in transport.node_pool.all():
|
for _ in transport.node_pool.all():
|
||||||
try:
|
try:
|
||||||
meta, node_infos = await transport.perform_request(
|
meta, node_infos = transport.perform_request(
|
||||||
'GET',
|
'GET',
|
||||||
'/_nodes/_all/http',
|
'/_nodes/_all/http',
|
||||||
headers={
|
headers = {
|
||||||
'accept': 'application/vnd.elasticsearch+json; compatible-with=8',
|
'accept': 'application/vnd.elasticsearch+json; compatible-with=8',
|
||||||
'authorization': f'Basic {auth_str}' # This auth header is missing in 8.x releases of the client, and causes 401s
|
'authorization': f'Basic {auth_str}' # This auth header is missing in 8.x releases of the client, and causes 401s
|
||||||
},
|
},
|
||||||
request_timeout=(
|
request_timeout = (
|
||||||
sniff_options.sniff_timeout
|
sniff_options.sniff_timeout
|
||||||
if not sniff_options.is_initial_sniff
|
if not sniff_options.is_initial_sniff
|
||||||
else None
|
else None
|
||||||
@ -79,7 +79,7 @@ def _override_async_sniff_callback(basic_auth):
|
|||||||
port = int(port_str)
|
port = int(port_str)
|
||||||
|
|
||||||
assert sniffed_node_callback is not None
|
assert sniffed_node_callback is not None
|
||||||
sniffed_node = await sniffed_node_callback(
|
sniffed_node = sniffed_node_callback(
|
||||||
node_info, meta.node.replace(host=host, port=port)
|
node_info, meta.node.replace(host=host, port=port)
|
||||||
)
|
)
|
||||||
if sniffed_node is None:
|
if sniffed_node is None:
|
||||||
@ -93,4 +93,4 @@ def _override_async_sniff_callback(basic_auth):
|
|||||||
|
|
||||||
return []
|
return []
|
||||||
|
|
||||||
return modified_async_sniff_callback
|
return modified_sniff_callback
|
@ -1,6 +1,6 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# Elasticsearch Recon Ingestion Scripts (ERIS) - Developed by Acidvegas (https://git.acid.vegas/eris)
|
# Elasticsearch Recon Ingestion Scripts (ERIS) - Developed by Acidvegas (https://git.acid.vegas/eris)
|
||||||
# sniff_patch.py
|
# sniff_patch.py [asyncronous developement]
|
||||||
|
|
||||||
# Note:
|
# Note:
|
||||||
# This is a patch for the elasticsearch 8.x client to fix the sniff_* options.
|
# This is a patch for the elasticsearch 8.x client to fix the sniff_* options.
|
||||||
@ -12,23 +12,23 @@
|
|||||||
|
|
||||||
import base64
|
import base64
|
||||||
|
|
||||||
import elasticsearch._sync.client as client
|
import elasticsearch._async.client as async_client
|
||||||
from elasticsearch.exceptions import SerializationError, ConnectionError
|
from elasticsearch.exceptions import SerializationError, ConnectionError
|
||||||
|
|
||||||
|
|
||||||
def init_elasticsearch(*args, **kwargs):
|
async def init_elasticsearch_async(*args, **kwargs):
|
||||||
'''
|
'''
|
||||||
Initialize the Elasticsearch client with the sniff patch.
|
Initialize the Async Elasticsearch client with the sniff patch.
|
||||||
|
|
||||||
:param args: Elasticsearch positional arguments.
|
:param args: Async Elasticsearch positional arguments.
|
||||||
:param kwargs: Elasticsearch keyword arguments.
|
:param kwargs: Async Elasticsearch keyword arguments.
|
||||||
'''
|
'''
|
||||||
client.default_sniff_callback = _override_sniff_callback(kwargs['basic_auth'])
|
async_client.default_sniff_callback = _override_async_sniff_callback(kwargs['basic_auth'])
|
||||||
|
|
||||||
return client.Elasticsearch(*args, **kwargs)
|
return async_client.AsyncElasticsearch(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
def _override_sniff_callback(basic_auth):
|
def _override_async_sniff_callback(basic_auth):
|
||||||
'''
|
'''
|
||||||
Taken from https://github.com/elastic/elasticsearch-py/blob/8.8/elasticsearch/_sync/client/_base.py#L166
|
Taken from https://github.com/elastic/elasticsearch-py/blob/8.8/elasticsearch/_sync/client/_base.py#L166
|
||||||
Completely unmodified except for adding the auth header to the elastic request.
|
Completely unmodified except for adding the auth header to the elastic request.
|
||||||
@ -38,19 +38,19 @@ def _override_sniff_callback(basic_auth):
|
|||||||
- https://github.com/elastic/elasticsearch-py/issues/2005
|
- https://github.com/elastic/elasticsearch-py/issues/2005
|
||||||
'''
|
'''
|
||||||
auth_str = base64.b64encode(':'.join(basic_auth).encode()).decode()
|
auth_str = base64.b64encode(':'.join(basic_auth).encode()).decode()
|
||||||
sniffed_node_callback = client._base._default_sniffed_node_callback
|
sniffed_node_callback = async_client._base._default_sniffed_node_callback
|
||||||
|
|
||||||
def modified_sniff_callback(transport, sniff_options):
|
async def modified_async_sniff_callback(transport, sniff_options):
|
||||||
for _ in transport.node_pool.all():
|
for _ in transport.node_pool.all():
|
||||||
try:
|
try:
|
||||||
meta, node_infos = transport.perform_request(
|
meta, node_infos = await transport.perform_request(
|
||||||
'GET',
|
'GET',
|
||||||
'/_nodes/_all/http',
|
'/_nodes/_all/http',
|
||||||
headers = {
|
headers={
|
||||||
'accept': 'application/vnd.elasticsearch+json; compatible-with=8',
|
'accept': 'application/vnd.elasticsearch+json; compatible-with=8',
|
||||||
'authorization': f'Basic {auth_str}' # This auth header is missing in 8.x releases of the client, and causes 401s
|
'authorization': f'Basic {auth_str}' # This auth header is missing in 8.x releases of the client, and causes 401s
|
||||||
},
|
},
|
||||||
request_timeout = (
|
request_timeout=(
|
||||||
sniff_options.sniff_timeout
|
sniff_options.sniff_timeout
|
||||||
if not sniff_options.is_initial_sniff
|
if not sniff_options.is_initial_sniff
|
||||||
else None
|
else None
|
||||||
@ -79,7 +79,7 @@ def _override_sniff_callback(basic_auth):
|
|||||||
port = int(port_str)
|
port = int(port_str)
|
||||||
|
|
||||||
assert sniffed_node_callback is not None
|
assert sniffed_node_callback is not None
|
||||||
sniffed_node = sniffed_node_callback(
|
sniffed_node = await sniffed_node_callback(
|
||||||
node_info, meta.node.replace(host=host, port=port)
|
node_info, meta.node.replace(host=host, port=port)
|
||||||
)
|
)
|
||||||
if sniffed_node is None:
|
if sniffed_node is None:
|
||||||
@ -93,4 +93,4 @@ def _override_sniff_callback(basic_auth):
|
|||||||
|
|
||||||
return []
|
return []
|
||||||
|
|
||||||
return modified_sniff_callback
|
return modified_async_sniff_callback
|
Loading…
Reference in New Issue
Block a user