diff --git a/eris.py b/eris.py index fb3330b..a3d37cf 100644 --- a/eris.py +++ b/eris.py @@ -1,18 +1,21 @@ #!/usr/bin/env python # Elasticsearch Recon Ingestion Scripts (ERIS) - Developed by Acidvegas (https://git.acid.vegas/eris) +# eris.py +import asyncio import argparse import logging import os import stat -import time import sys sys.dont_write_bytecode = True try: - from elasticsearch import Elasticsearch, helpers + # This is commented out because there is a bug with the elasticsearch library that requires a patch (see initialize() method below) + #from elasticsearch import AsyncElasticsearch from elasticsearch.exceptions import NotFoundError + from elasticsearch.helpers import async_streaming_bulk except ImportError: raise ImportError('Missing required \'elasticsearch\' library. (pip install elasticsearch)') @@ -28,48 +31,51 @@ class ElasticIndexer: :param args: Parsed arguments from argparse ''' - self.chunk_max = args.chunk_max * 1024 * 1024 # MB + self.chunk_max = args.chunk_max * 1024 * 1024 # MB self.chunk_size = args.chunk_size - self.chunk_threads = args.chunk_threads - self.dry_run = args.dry_run + self.es = None self.es_index = args.index - if not args.dry_run: - es_config = { - 'hosts': [f'{args.host}:{args.port}'], - 'verify_certs': args.self_signed, - 'ssl_show_warn': args.self_signed, - 'request_timeout': args.timeout, - 'max_retries': args.retries, - 'retry_on_timeout': True, - 'sniff_on_start': False, - 'sniff_on_node_failure': True, - 'min_delay_between_sniffing': 60 # Add config option for this? - } + self.es_config = { + 'hosts': [f'{args.host}:{args.port}'], + 'verify_certs': args.self_signed, + 'ssl_show_warn': args.self_signed, + 'request_timeout': args.timeout, + 'max_retries': args.retries, + 'retry_on_timeout': True, + 'sniff_on_start': True, # Is this problematic? + 'sniff_on_node_failure': True, + 'min_delay_between_sniffing': 60 # Add config option for this? + } - if args.api_key: - es_config['headers'] = {'Authorization': f'ApiKey {args.api_key}'} - else: - es_config['basic_auth'] = (args.user, args.password) - - # Patching the Elasticsearch client to fix a bug with sniffing (https://github.com/elastic/elasticsearch-py/issues/2005#issuecomment-1645641960) - import sniff_patch - self.es = sniff_patch.init_elasticsearch(**es_config) + if args.api_key: + self.es_config['api_key'] = (args.api_key, '') # Verify this is correct + else: + self.es_config['basic_auth'] = (args.user, args.password) + + + async def initialize(self): + '''Initialize the Elasticsearch client.''' - # Remove the above and uncomment the below if the bug is fixed in the Elasticsearch client: - #self.es = Elasticsearch(**es_config) + # Patching the Elasticsearch client to fix a bug with sniffing (https://github.com/elastic/elasticsearch-py/issues/2005#issuecomment-1645641960) + import sniff_patch + self.es = sniff_patch.init_elasticsearch(**self.es_config) + + # Remove the above and uncomment the below if the bug is fixed in the Elasticsearch client: + #self.es = AsyncElasticsearch(**es_config) - def create_index(self, map_body: dict, pipeline: str = '', replicas: int = 1, shards: int = 1, ): + async def create_index(self, map_body: dict, pipeline: str = '', replicas: int = 1, shards: int = 1): ''' Create the Elasticsearch index with the defined mapping. - :param pipline: Name of the ingest pipeline to use for the index + :param map_body: Mapping for the index + :param pipeline: Name of the ingest pipeline to use for the index :param replicas: Number of replicas for the index :param shards: Number of shards for the index ''' - if self.es.indices.exists(index=self.es_index): + if await self.es.indices.exists(index=self.es_index): logging.info(f'Index \'{self.es_index}\' already exists.') return @@ -82,13 +88,13 @@ class ElasticIndexer: if pipeline: try: - self.es.ingest.get_pipeline(id=pipeline) + await self.es.ingest.get_pipeline(id=pipeline) logging.info(f'Using ingest pipeline \'{pipeline}\' for index \'{self.es_index}\'') mapping['settings']['index.default_pipeline'] = pipeline except NotFoundError: raise ValueError(f'Ingest pipeline \'{pipeline}\' does not exist.') - response = self.es.indices.create(index=self.es_index, body=mapping) + response = await self.es.indices.create(index=self.es_index, body=mapping) if response.get('acknowledged') and response.get('shards_acknowledged'): logging.info(f'Index \'{self.es_index}\' successfully created.') @@ -96,106 +102,62 @@ class ElasticIndexer: raise Exception(f'Failed to create index. ({response})') - def get_cluster_health(self) -> dict: + async def get_cluster_health(self) -> dict: '''Get the health of the Elasticsearch cluster.''' - return self.es.cluster.health() + return await self.es.cluster.health() - def get_cluster_size(self) -> int: + async def get_cluster_size(self) -> int: '''Get the number of nodes in the Elasticsearch cluster.''' - cluster_stats = self.es.cluster.stats() + cluster_stats = await self.es.cluster.stats() number_of_nodes = cluster_stats['nodes']['count']['total'] return number_of_nodes - def bulk_index(self, documents: list, file_path: str, count: int): + async def process_data(self, file_path: str, data_generator: callable): ''' - Index a batch of documents to Elasticsearch. - - :param documents: List of documents to index - :param file_path: Path to the file being indexed - :param count: Total number of records processed - ''' - - remaining_documents = documents - - parallel_bulk_config = { - 'client': self.es, - 'chunk_size': self.chunk_size, - 'max_chunk_bytes': self.chunk_max, - 'thread_count': self.chunk_threads, - 'queue_size': 2 # Add config option for this? - } - - while remaining_documents: - failed_documents = [] - - try: - for success, response in helpers.parallel_bulk(actions=remaining_documents, **parallel_bulk_config): - if not success: - failed_documents.append(response) - - if not failed_documents: - ingested = parallel_bulk_config['chunk_size'] * parallel_bulk_config['thread_count'] - logging.info(f'Successfully indexed {ingested:,} ({count:,} processed) records to {self.es_index} from {file_path}') - break - - else: - logging.warning(f'Failed to index {len(failed_documents):,} failed documents! Retrying...') - remaining_documents = failed_documents - except Exception as e: - logging.error(f'Failed to index documents! ({e})') - time.sleep(30) # Should we add a config option for this? - - - def process_file(self, file_path: str, batch_size: int, ingest_function: callable): - ''' - Read and index records in batches to Elasticsearch. + Index records in chunks to Elasticsearch. :param file_path: Path to the file - :param batch_size: Number of records to index per batch - :param ingest_function: Function to process the file + :param index_name: Name of the index + :param data_generator: Generator for the records to index ''' count = 0 - records = [] + total = 0 + + async for ok, result in async_streaming_bulk(self.es, actions=data_generator(file_path), chunk_size=self.chunk_size, max_chunk_bytes=self.chunk_max): + action, result = result.popitem() - for processed in ingest_function(file_path): - - if not processed: - break - - if self.dry_run: - print(processed) + if not ok: + logging.error(f'Failed to index document ({result["_id"]}) to {self.es_index} from {file_path} ({result})') + input('Press Enter to continue...') # Debugging (will possibly remove this since we have retries enabled) continue - struct = {'_index': self.es_index, '_source': processed} - records.append(struct) count += 1 - - if len(records) >= batch_size: - self.bulk_index(records, file_path, count) - records = [] + total += 1 - if records: - self.bulk_index(records, file_path, count) + if count == self.chunk_size: + logging.info(f'Successfully indexed {self.chunk_size:,} ({total:,} processed) records to {self.es_index} from {file_path}') + count = 0 + + logging.info(f'Finished indexing {total:,} records to {self.es_index} from {file_path}') -def main(): +async def main(): '''Main function when running this script directly.''' parser = argparse.ArgumentParser(description='Index data into Elasticsearch.') # General arguments parser.add_argument('input_path', help='Path to the input file or directory') # Required - parser.add_argument('--dry-run', action='store_true', help='Dry run (do not index records to Elasticsearch)') parser.add_argument('--watch', action='store_true', help='Create or watch a FIFO for real-time indexing') # Elasticsearch arguments - parser.add_argument('--host', default='localhost', help='Elasticsearch host') + parser.add_argument('--host', default='http://localhost/', help='Elasticsearch host') parser.add_argument('--port', type=int, default=9200, help='Elasticsearch port') parser.add_argument('--user', default='elastic', help='Elasticsearch username') parser.add_argument('--password', default=os.getenv('ES_PASSWORD'), help='Elasticsearch password (if not provided, check environment variable ES_PASSWORD)') @@ -206,16 +168,16 @@ def main(): parser.add_argument('--index', help='Elasticsearch index name') parser.add_argument('--pipeline', help='Use an ingest pipeline for the index') parser.add_argument('--replicas', type=int, default=1, help='Number of replicas for the index') - parser.add_argument('--shards', type=int, default=3, help='Number of shards for the index') + parser.add_argument('--shards', type=int, default=1, help='Number of shards for the index') # Performance arguments - parser.add_argument('--chunk-max', type=int, default=10, help='Maximum size in MB of a chunk') parser.add_argument('--chunk-size', type=int, default=50000, help='Number of records to index in a chunk') - parser.add_argument('--chunk-threads', type=int, default=3, help='Number of threads to use when indexing in chunks') - parser.add_argument('--retries', type=int, default=60, help='Number of times to retry indexing a chunk before failing') - parser.add_argument('--timeout', type=int, default=30, help='Number of seconds to wait before retrying a chunk') + parser.add_argument('--chunk-max', type=int, default=100, help='Maximum size of a chunk in bytes') + parser.add_argument('--retries', type=int, default=100, help='Number of times to retry indexing a chunk before failing') + parser.add_argument('--timeout', type=int, default=60, help='Number of seconds to wait before retrying a chunk') # Ingestion arguments + parser.add_argument('--cert', action='store_true', help='Index Certstream records') parser.add_argument('--httpx', action='store_true', help='Index Httpx records') parser.add_argument('--masscan', action='store_true', help='Index Masscan records') parser.add_argument('--massdns', action='store_true', help='Index Massdns records') @@ -232,7 +194,10 @@ def main(): raise FileNotFoundError(f'Input path {args.input_path} does not exist or is not a file or directory') edx = ElasticIndexer(args) + await edx.initialize() # Initialize the Elasticsearch client asyncronously + if args.cert: + from ingestors import ingest_certs as ingestor if args.httpx: from ingestors import ingest_httpx as ingestor elif args.masscan: @@ -241,32 +206,28 @@ def main(): from ingestors import ingest_massdns as ingestor elif args.zone: from ingestors import ingest_zone as ingestor - - batch_size = 0 - if not args.dry_run: - print(edx.get_cluster_health()) + health = await edx.get_cluster_health() + print(health) - time.sleep(3) # Delay to allow time for sniffing to complete - - nodes = edx.get_cluster_size() - logging.info(f'Connected to {nodes:,} Elasticsearch node(s)') + await asyncio.sleep(5) # Delay to allow time for sniffing to complete + + nodes = await edx.get_cluster_size() + logging.info(f'Connected to {nodes:,} Elasticsearch node(s)') - if not edx.es_index: - edx.es_index = ingestor.default_index + if not edx.es_index: + edx.es_index = ingestor.default_index - map_body = ingestor.construct_map() - edx.create_index(map_body, args.pipeline, args.replicas, args.shards) - - batch_size = int(nodes * (args.chunk_size * args.chunk_threads)) + map_body = ingestor.construct_map() + await edx.create_index(map_body, args.pipeline, args.replicas, args.shards) if os.path.isfile(args.input_path): logging.info(f'Processing file: {args.input_path}') - edx.process_file(args.input_path, batch_size, ingestor.process_file) + await edx.process_data(args.input_path, ingestor.process_data) elif stat.S_ISFIFO(os.stat(args.input_path).st_mode): logging.info(f'Watching FIFO: {args.input_path}') - edx.process_file(args.input_path, batch_size, ingestor.process_file) + await edx.process_data(args.input_path, ingestor.process_data) elif os.path.isdir(args.input_path): count = 1 @@ -276,7 +237,7 @@ def main(): file_path = os.path.join(args.input_path, file) if os.path.isfile(file_path): logging.info(f'[{count:,}/{total:,}] Processing file: {file_path}') - edx.process_file(file_path, batch_size, ingestor.process_file) + await edx.process_data(file_path, ingestor.process_data) count += 1 else: logging.warning(f'[{count:,}/{total:,}] Skipping non-file: {file_path}') @@ -284,4 +245,4 @@ def main(): if __name__ == '__main__': - main() \ No newline at end of file + asyncio.run(main()) \ No newline at end of file diff --git a/async_dev/ingestors/ingest_certs.py b/ingestors/ingest_certs.py similarity index 100% rename from async_dev/ingestors/ingest_certs.py rename to ingestors/ingest_certs.py diff --git a/ingestors/ingest_httpx.py b/ingestors/ingest_httpx.py index 4ea14bc..93d8b58 100644 --- a/ingestors/ingest_httpx.py +++ b/ingestors/ingest_httpx.py @@ -4,6 +4,11 @@ import json +try: + import aiofiles +except ImportError: + raise ImportError('Missing required \'aiofiles\' library. (pip install aiofiles)') + default_index = 'httpx-logs' def construct_map() -> dict: @@ -22,15 +27,15 @@ def construct_map() -> dict: return mapping -def process_file(file_path: str): +async def process_data(file_path: str): ''' Read and process HTTPX records from the log file. :param file_path: Path to the HTTPX log file ''' - with open(file_path, 'r') as file: - for line in file: + async with aiofiles.open(file_path, mode='r') as input_file: + async for line in input_file: line = line.strip() if not line: @@ -43,7 +48,7 @@ def process_file(file_path: str): del record['failed'], record['knowledgebase'], record['time'] - yield record + yield {'_index': default_index, '_source': record} return None # EOF diff --git a/ingestors/ingest_masscan.py b/ingestors/ingest_masscan.py index 96f1802..5eb0660 100644 --- a/ingestors/ingest_masscan.py +++ b/ingestors/ingest_masscan.py @@ -19,6 +19,11 @@ import logging import re import time +try: + import aiofiles +except ImportError: + raise ImportError('Missing required \'aiofiles\' library. (pip install aiofiles)') + default_index = 'masscan-logs' def construct_map() -> dict: @@ -54,15 +59,15 @@ def construct_map() -> dict: return mapping -def process_file(file_path: str): +async def process_data(file_path: str): ''' Read and process Masscan records from the log file. :param file_path: Path to the Masscan log file ''' - with open(file_path, 'r') as file: - for line in file: + async with aiofiles.open(file_path, mode='r') as input_file: + async for line in input_file: line = line.strip() if not line or not line.startswith('{'): @@ -74,22 +79,29 @@ def process_file(file_path: str): try: record = json.loads(line) except json.decoder.JSONDecodeError: + # In rare cases, the JSON record may be incomplete or malformed: + # { "ip": "51.161.12.223", "timestamp": "1707628302", "ports": [ {"port": 22, "proto": "tcp", "service": {"name": "ssh", "banner": + # { "ip": "83.66.211.246", "timestamp": "1706557002" logging.error(f'Failed to parse JSON record! ({line})') - input('Press Enter to continue...') # Debugging + input('Press Enter to continue...') # Pause for review & debugging (Will remove pausing in production, still investigating the cause of this issue.) continue + if len(record['ports']) > 1: + logging.warning(f'Multiple ports found for record! ({record})') + input('Press Enter to continue...') # Pause for review (Will remove pausing in production, still investigating if you ever seen more than one port in a record.) + for port_info in record['ports']: struct = { - 'ip': record['ip'], - 'port': port_info['port'], - 'proto': port_info['proto'], - 'seen': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(int(record['timestamp']))), + 'ip' : record['ip'], + 'port' : port_info['port'], + 'proto' : port_info['proto'], + 'seen' : time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(int(record['timestamp']))), } if 'service' in port_info: if 'name' in port_info['service']: - if port_info['service']['name'] != 'unknown': - struct['service'] = port_info['service']['name'] + if (service_name := port_info['service']['name']) not in ('unknown',''): + struct['service'] = service_name if 'banner' in port_info['service']: banner = ' '.join(port_info['service']['banner'].split()) # Remove extra whitespace @@ -100,7 +112,7 @@ def process_file(file_path: str): else: struct['banner'] = banner - yield struct + yield {'_index': default_index, '_source': struct} return None # EOF @@ -131,6 +143,6 @@ Will be indexed as: "service": "ssh", "banner": "SSH-2.0-OpenSSH_8.9p1 Ubuntu-3ubuntu0.4", "seen": "2021-10-08T02:04:28Z", - "ref_id": "?sKfOvsC4M4a2W8PaC4zF?" # TCP RST Payload (Do we need this?) + "ref_id": "?sKfOvsC4M4a2W8PaC4zF?" # TCP RST Payload, Might be useful.. } ''' \ No newline at end of file diff --git a/ingestors/ingest_massdns.py b/ingestors/ingest_massdns.py index fd8f3b7..5c8e121 100644 --- a/ingestors/ingest_massdns.py +++ b/ingestors/ingest_massdns.py @@ -4,6 +4,11 @@ import time +try: + import aiofiles +except ImportError: + raise ImportError('Missing required \'aiofiles\' library. (pip install aiofiles)') + default_index = 'ptr-records' def construct_map() -> dict: @@ -25,15 +30,15 @@ def construct_map() -> dict: return mapping -def process_file(file_path: str): +async def process_data(file_path: str): ''' Read and process Massdns records from the log file. :param file_path: Path to the Massdns log file ''' - with open(file_path, 'r') as file: - for line in file: + async with aiofiles.open(file_path, mode='r') as input_file: + async for line in input_file: line = line.strip() if not line: @@ -65,7 +70,7 @@ def process_file(file_path: str): 'seen': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) } - yield struct + yield {'_index': default_index, '_source': struct} return None # EOF diff --git a/ingestors/ingest_zone.py b/ingestors/ingest_zone.py index 5c9d358..7cb552d 100644 --- a/ingestors/ingest_zone.py +++ b/ingestors/ingest_zone.py @@ -4,6 +4,11 @@ import time +try: + import aiofiles +except ImportError: + raise ImportError('Missing required \'aiofiles\' library. (pip install aiofiles)') + default_index = 'dns-zones' record_types = ('a','aaaa','caa','cdnskey','cds','cname','dnskey','ds','mx','naptr','ns','nsec','nsec3','nsec3param','ptr','rrsig','rp','sshfp','soa','srv','txt','type65534') @@ -42,7 +47,7 @@ def construct_map() -> dict: return mapping -def process_file(file_path: str): +async def process_data(file_path: str): ''' Read and process zone file records. @@ -52,8 +57,8 @@ def process_file(file_path: str): domain_records = {} last_domain = None - with open(file_path, 'r') as file: - for line in file: + async with aiofiles.open(file_path, mode='r') as input_file: + async for line in input_file: line = line.strip() if not line or line.startswith(';'): @@ -88,11 +93,11 @@ def process_file(file_path: str): if domain != last_domain: if last_domain: - source = {'domain': last_domain, 'records': domain_records[last_domain], 'seen': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())} + struct = {'domain': last_domain, 'records': domain_records[last_domain], 'seen': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())} del domain_records[last_domain] - yield source + yield {'_index': default_index, '_source': struct} last_domain = domain diff --git a/async_dev/eris.py b/old/eris.py similarity index 50% rename from async_dev/eris.py rename to old/eris.py index a3d37cf..fb3330b 100644 --- a/async_dev/eris.py +++ b/old/eris.py @@ -1,21 +1,18 @@ #!/usr/bin/env python # Elasticsearch Recon Ingestion Scripts (ERIS) - Developed by Acidvegas (https://git.acid.vegas/eris) -# eris.py -import asyncio import argparse import logging import os import stat +import time import sys sys.dont_write_bytecode = True try: - # This is commented out because there is a bug with the elasticsearch library that requires a patch (see initialize() method below) - #from elasticsearch import AsyncElasticsearch + from elasticsearch import Elasticsearch, helpers from elasticsearch.exceptions import NotFoundError - from elasticsearch.helpers import async_streaming_bulk except ImportError: raise ImportError('Missing required \'elasticsearch\' library. (pip install elasticsearch)') @@ -31,51 +28,48 @@ class ElasticIndexer: :param args: Parsed arguments from argparse ''' - self.chunk_max = args.chunk_max * 1024 * 1024 # MB + self.chunk_max = args.chunk_max * 1024 * 1024 # MB self.chunk_size = args.chunk_size - self.es = None + self.chunk_threads = args.chunk_threads + self.dry_run = args.dry_run self.es_index = args.index - self.es_config = { - 'hosts': [f'{args.host}:{args.port}'], - 'verify_certs': args.self_signed, - 'ssl_show_warn': args.self_signed, - 'request_timeout': args.timeout, - 'max_retries': args.retries, - 'retry_on_timeout': True, - 'sniff_on_start': True, # Is this problematic? - 'sniff_on_node_failure': True, - 'min_delay_between_sniffing': 60 # Add config option for this? - } + if not args.dry_run: + es_config = { + 'hosts': [f'{args.host}:{args.port}'], + 'verify_certs': args.self_signed, + 'ssl_show_warn': args.self_signed, + 'request_timeout': args.timeout, + 'max_retries': args.retries, + 'retry_on_timeout': True, + 'sniff_on_start': False, + 'sniff_on_node_failure': True, + 'min_delay_between_sniffing': 60 # Add config option for this? + } - if args.api_key: - self.es_config['api_key'] = (args.api_key, '') # Verify this is correct - else: - self.es_config['basic_auth'] = (args.user, args.password) - - - async def initialize(self): - '''Initialize the Elasticsearch client.''' + if args.api_key: + es_config['headers'] = {'Authorization': f'ApiKey {args.api_key}'} + else: + es_config['basic_auth'] = (args.user, args.password) + + # Patching the Elasticsearch client to fix a bug with sniffing (https://github.com/elastic/elasticsearch-py/issues/2005#issuecomment-1645641960) + import sniff_patch + self.es = sniff_patch.init_elasticsearch(**es_config) - # Patching the Elasticsearch client to fix a bug with sniffing (https://github.com/elastic/elasticsearch-py/issues/2005#issuecomment-1645641960) - import sniff_patch - self.es = sniff_patch.init_elasticsearch(**self.es_config) - - # Remove the above and uncomment the below if the bug is fixed in the Elasticsearch client: - #self.es = AsyncElasticsearch(**es_config) + # Remove the above and uncomment the below if the bug is fixed in the Elasticsearch client: + #self.es = Elasticsearch(**es_config) - async def create_index(self, map_body: dict, pipeline: str = '', replicas: int = 1, shards: int = 1): + def create_index(self, map_body: dict, pipeline: str = '', replicas: int = 1, shards: int = 1, ): ''' Create the Elasticsearch index with the defined mapping. - :param map_body: Mapping for the index - :param pipeline: Name of the ingest pipeline to use for the index + :param pipline: Name of the ingest pipeline to use for the index :param replicas: Number of replicas for the index :param shards: Number of shards for the index ''' - if await self.es.indices.exists(index=self.es_index): + if self.es.indices.exists(index=self.es_index): logging.info(f'Index \'{self.es_index}\' already exists.') return @@ -88,13 +82,13 @@ class ElasticIndexer: if pipeline: try: - await self.es.ingest.get_pipeline(id=pipeline) + self.es.ingest.get_pipeline(id=pipeline) logging.info(f'Using ingest pipeline \'{pipeline}\' for index \'{self.es_index}\'') mapping['settings']['index.default_pipeline'] = pipeline except NotFoundError: raise ValueError(f'Ingest pipeline \'{pipeline}\' does not exist.') - response = await self.es.indices.create(index=self.es_index, body=mapping) + response = self.es.indices.create(index=self.es_index, body=mapping) if response.get('acknowledged') and response.get('shards_acknowledged'): logging.info(f'Index \'{self.es_index}\' successfully created.') @@ -102,62 +96,106 @@ class ElasticIndexer: raise Exception(f'Failed to create index. ({response})') - async def get_cluster_health(self) -> dict: + def get_cluster_health(self) -> dict: '''Get the health of the Elasticsearch cluster.''' - return await self.es.cluster.health() + return self.es.cluster.health() - async def get_cluster_size(self) -> int: + def get_cluster_size(self) -> int: '''Get the number of nodes in the Elasticsearch cluster.''' - cluster_stats = await self.es.cluster.stats() + cluster_stats = self.es.cluster.stats() number_of_nodes = cluster_stats['nodes']['count']['total'] return number_of_nodes - async def process_data(self, file_path: str, data_generator: callable): + def bulk_index(self, documents: list, file_path: str, count: int): ''' - Index records in chunks to Elasticsearch. + Index a batch of documents to Elasticsearch. + + :param documents: List of documents to index + :param file_path: Path to the file being indexed + :param count: Total number of records processed + ''' + + remaining_documents = documents + + parallel_bulk_config = { + 'client': self.es, + 'chunk_size': self.chunk_size, + 'max_chunk_bytes': self.chunk_max, + 'thread_count': self.chunk_threads, + 'queue_size': 2 # Add config option for this? + } + + while remaining_documents: + failed_documents = [] + + try: + for success, response in helpers.parallel_bulk(actions=remaining_documents, **parallel_bulk_config): + if not success: + failed_documents.append(response) + + if not failed_documents: + ingested = parallel_bulk_config['chunk_size'] * parallel_bulk_config['thread_count'] + logging.info(f'Successfully indexed {ingested:,} ({count:,} processed) records to {self.es_index} from {file_path}') + break + + else: + logging.warning(f'Failed to index {len(failed_documents):,} failed documents! Retrying...') + remaining_documents = failed_documents + except Exception as e: + logging.error(f'Failed to index documents! ({e})') + time.sleep(30) # Should we add a config option for this? + + + def process_file(self, file_path: str, batch_size: int, ingest_function: callable): + ''' + Read and index records in batches to Elasticsearch. :param file_path: Path to the file - :param index_name: Name of the index - :param data_generator: Generator for the records to index + :param batch_size: Number of records to index per batch + :param ingest_function: Function to process the file ''' count = 0 - total = 0 - - async for ok, result in async_streaming_bulk(self.es, actions=data_generator(file_path), chunk_size=self.chunk_size, max_chunk_bytes=self.chunk_max): - action, result = result.popitem() + records = [] - if not ok: - logging.error(f'Failed to index document ({result["_id"]}) to {self.es_index} from {file_path} ({result})') - input('Press Enter to continue...') # Debugging (will possibly remove this since we have retries enabled) + for processed in ingest_function(file_path): + + if not processed: + break + + if self.dry_run: + print(processed) continue + struct = {'_index': self.es_index, '_source': processed} + records.append(struct) count += 1 - total += 1 + + if len(records) >= batch_size: + self.bulk_index(records, file_path, count) + records = [] - if count == self.chunk_size: - logging.info(f'Successfully indexed {self.chunk_size:,} ({total:,} processed) records to {self.es_index} from {file_path}') - count = 0 - - logging.info(f'Finished indexing {total:,} records to {self.es_index} from {file_path}') + if records: + self.bulk_index(records, file_path, count) -async def main(): +def main(): '''Main function when running this script directly.''' parser = argparse.ArgumentParser(description='Index data into Elasticsearch.') # General arguments parser.add_argument('input_path', help='Path to the input file or directory') # Required + parser.add_argument('--dry-run', action='store_true', help='Dry run (do not index records to Elasticsearch)') parser.add_argument('--watch', action='store_true', help='Create or watch a FIFO for real-time indexing') # Elasticsearch arguments - parser.add_argument('--host', default='http://localhost/', help='Elasticsearch host') + parser.add_argument('--host', default='localhost', help='Elasticsearch host') parser.add_argument('--port', type=int, default=9200, help='Elasticsearch port') parser.add_argument('--user', default='elastic', help='Elasticsearch username') parser.add_argument('--password', default=os.getenv('ES_PASSWORD'), help='Elasticsearch password (if not provided, check environment variable ES_PASSWORD)') @@ -168,16 +206,16 @@ async def main(): parser.add_argument('--index', help='Elasticsearch index name') parser.add_argument('--pipeline', help='Use an ingest pipeline for the index') parser.add_argument('--replicas', type=int, default=1, help='Number of replicas for the index') - parser.add_argument('--shards', type=int, default=1, help='Number of shards for the index') + parser.add_argument('--shards', type=int, default=3, help='Number of shards for the index') # Performance arguments + parser.add_argument('--chunk-max', type=int, default=10, help='Maximum size in MB of a chunk') parser.add_argument('--chunk-size', type=int, default=50000, help='Number of records to index in a chunk') - parser.add_argument('--chunk-max', type=int, default=100, help='Maximum size of a chunk in bytes') - parser.add_argument('--retries', type=int, default=100, help='Number of times to retry indexing a chunk before failing') - parser.add_argument('--timeout', type=int, default=60, help='Number of seconds to wait before retrying a chunk') + parser.add_argument('--chunk-threads', type=int, default=3, help='Number of threads to use when indexing in chunks') + parser.add_argument('--retries', type=int, default=60, help='Number of times to retry indexing a chunk before failing') + parser.add_argument('--timeout', type=int, default=30, help='Number of seconds to wait before retrying a chunk') # Ingestion arguments - parser.add_argument('--cert', action='store_true', help='Index Certstream records') parser.add_argument('--httpx', action='store_true', help='Index Httpx records') parser.add_argument('--masscan', action='store_true', help='Index Masscan records') parser.add_argument('--massdns', action='store_true', help='Index Massdns records') @@ -194,10 +232,7 @@ async def main(): raise FileNotFoundError(f'Input path {args.input_path} does not exist or is not a file or directory') edx = ElasticIndexer(args) - await edx.initialize() # Initialize the Elasticsearch client asyncronously - if args.cert: - from ingestors import ingest_certs as ingestor if args.httpx: from ingestors import ingest_httpx as ingestor elif args.masscan: @@ -206,28 +241,32 @@ async def main(): from ingestors import ingest_massdns as ingestor elif args.zone: from ingestors import ingest_zone as ingestor + + batch_size = 0 - health = await edx.get_cluster_health() - print(health) + if not args.dry_run: + print(edx.get_cluster_health()) - await asyncio.sleep(5) # Delay to allow time for sniffing to complete - - nodes = await edx.get_cluster_size() - logging.info(f'Connected to {nodes:,} Elasticsearch node(s)') + time.sleep(3) # Delay to allow time for sniffing to complete + + nodes = edx.get_cluster_size() + logging.info(f'Connected to {nodes:,} Elasticsearch node(s)') - if not edx.es_index: - edx.es_index = ingestor.default_index + if not edx.es_index: + edx.es_index = ingestor.default_index - map_body = ingestor.construct_map() - await edx.create_index(map_body, args.pipeline, args.replicas, args.shards) + map_body = ingestor.construct_map() + edx.create_index(map_body, args.pipeline, args.replicas, args.shards) + + batch_size = int(nodes * (args.chunk_size * args.chunk_threads)) if os.path.isfile(args.input_path): logging.info(f'Processing file: {args.input_path}') - await edx.process_data(args.input_path, ingestor.process_data) + edx.process_file(args.input_path, batch_size, ingestor.process_file) elif stat.S_ISFIFO(os.stat(args.input_path).st_mode): logging.info(f'Watching FIFO: {args.input_path}') - await edx.process_data(args.input_path, ingestor.process_data) + edx.process_file(args.input_path, batch_size, ingestor.process_file) elif os.path.isdir(args.input_path): count = 1 @@ -237,7 +276,7 @@ async def main(): file_path = os.path.join(args.input_path, file) if os.path.isfile(file_path): logging.info(f'[{count:,}/{total:,}] Processing file: {file_path}') - await edx.process_data(file_path, ingestor.process_data) + edx.process_file(file_path, batch_size, ingestor.process_file) count += 1 else: logging.warning(f'[{count:,}/{total:,}] Skipping non-file: {file_path}') @@ -245,4 +284,4 @@ async def main(): if __name__ == '__main__': - asyncio.run(main()) \ No newline at end of file + main() \ No newline at end of file diff --git a/async_dev/ingestors/__init__.py b/old/ingestors/__init__.py similarity index 100% rename from async_dev/ingestors/__init__.py rename to old/ingestors/__init__.py diff --git a/async_dev/ingestors/ingest_httpx.py b/old/ingestors/ingest_httpx.py similarity index 88% rename from async_dev/ingestors/ingest_httpx.py rename to old/ingestors/ingest_httpx.py index 93d8b58..4ea14bc 100644 --- a/async_dev/ingestors/ingest_httpx.py +++ b/old/ingestors/ingest_httpx.py @@ -4,11 +4,6 @@ import json -try: - import aiofiles -except ImportError: - raise ImportError('Missing required \'aiofiles\' library. (pip install aiofiles)') - default_index = 'httpx-logs' def construct_map() -> dict: @@ -27,15 +22,15 @@ def construct_map() -> dict: return mapping -async def process_data(file_path: str): +def process_file(file_path: str): ''' Read and process HTTPX records from the log file. :param file_path: Path to the HTTPX log file ''' - async with aiofiles.open(file_path, mode='r') as input_file: - async for line in input_file: + with open(file_path, 'r') as file: + for line in file: line = line.strip() if not line: @@ -48,7 +43,7 @@ async def process_data(file_path: str): del record['failed'], record['knowledgebase'], record['time'] - yield {'_index': default_index, '_source': record} + yield record return None # EOF diff --git a/async_dev/ingestors/ingest_masscan.py b/old/ingestors/ingest_masscan.py similarity index 72% rename from async_dev/ingestors/ingest_masscan.py rename to old/ingestors/ingest_masscan.py index 5eb0660..96f1802 100644 --- a/async_dev/ingestors/ingest_masscan.py +++ b/old/ingestors/ingest_masscan.py @@ -19,11 +19,6 @@ import logging import re import time -try: - import aiofiles -except ImportError: - raise ImportError('Missing required \'aiofiles\' library. (pip install aiofiles)') - default_index = 'masscan-logs' def construct_map() -> dict: @@ -59,15 +54,15 @@ def construct_map() -> dict: return mapping -async def process_data(file_path: str): +def process_file(file_path: str): ''' Read and process Masscan records from the log file. :param file_path: Path to the Masscan log file ''' - async with aiofiles.open(file_path, mode='r') as input_file: - async for line in input_file: + with open(file_path, 'r') as file: + for line in file: line = line.strip() if not line or not line.startswith('{'): @@ -79,29 +74,22 @@ async def process_data(file_path: str): try: record = json.loads(line) except json.decoder.JSONDecodeError: - # In rare cases, the JSON record may be incomplete or malformed: - # { "ip": "51.161.12.223", "timestamp": "1707628302", "ports": [ {"port": 22, "proto": "tcp", "service": {"name": "ssh", "banner": - # { "ip": "83.66.211.246", "timestamp": "1706557002" logging.error(f'Failed to parse JSON record! ({line})') - input('Press Enter to continue...') # Pause for review & debugging (Will remove pausing in production, still investigating the cause of this issue.) + input('Press Enter to continue...') # Debugging continue - if len(record['ports']) > 1: - logging.warning(f'Multiple ports found for record! ({record})') - input('Press Enter to continue...') # Pause for review (Will remove pausing in production, still investigating if you ever seen more than one port in a record.) - for port_info in record['ports']: struct = { - 'ip' : record['ip'], - 'port' : port_info['port'], - 'proto' : port_info['proto'], - 'seen' : time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(int(record['timestamp']))), + 'ip': record['ip'], + 'port': port_info['port'], + 'proto': port_info['proto'], + 'seen': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(int(record['timestamp']))), } if 'service' in port_info: if 'name' in port_info['service']: - if (service_name := port_info['service']['name']) not in ('unknown',''): - struct['service'] = service_name + if port_info['service']['name'] != 'unknown': + struct['service'] = port_info['service']['name'] if 'banner' in port_info['service']: banner = ' '.join(port_info['service']['banner'].split()) # Remove extra whitespace @@ -112,7 +100,7 @@ async def process_data(file_path: str): else: struct['banner'] = banner - yield {'_index': default_index, '_source': struct} + yield struct return None # EOF @@ -143,6 +131,6 @@ Will be indexed as: "service": "ssh", "banner": "SSH-2.0-OpenSSH_8.9p1 Ubuntu-3ubuntu0.4", "seen": "2021-10-08T02:04:28Z", - "ref_id": "?sKfOvsC4M4a2W8PaC4zF?" # TCP RST Payload, Might be useful.. + "ref_id": "?sKfOvsC4M4a2W8PaC4zF?" # TCP RST Payload (Do we need this?) } ''' \ No newline at end of file diff --git a/async_dev/ingestors/ingest_massdns.py b/old/ingestors/ingest_massdns.py similarity index 86% rename from async_dev/ingestors/ingest_massdns.py rename to old/ingestors/ingest_massdns.py index 5c8e121..fd8f3b7 100644 --- a/async_dev/ingestors/ingest_massdns.py +++ b/old/ingestors/ingest_massdns.py @@ -4,11 +4,6 @@ import time -try: - import aiofiles -except ImportError: - raise ImportError('Missing required \'aiofiles\' library. (pip install aiofiles)') - default_index = 'ptr-records' def construct_map() -> dict: @@ -30,15 +25,15 @@ def construct_map() -> dict: return mapping -async def process_data(file_path: str): +def process_file(file_path: str): ''' Read and process Massdns records from the log file. :param file_path: Path to the Massdns log file ''' - async with aiofiles.open(file_path, mode='r') as input_file: - async for line in input_file: + with open(file_path, 'r') as file: + for line in file: line = line.strip() if not line: @@ -70,7 +65,7 @@ async def process_data(file_path: str): 'seen': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) } - yield {'_index': default_index, '_source': struct} + yield struct return None # EOF diff --git a/async_dev/ingestors/ingest_zone.py b/old/ingestors/ingest_zone.py similarity index 91% rename from async_dev/ingestors/ingest_zone.py rename to old/ingestors/ingest_zone.py index 7cb552d..5c9d358 100644 --- a/async_dev/ingestors/ingest_zone.py +++ b/old/ingestors/ingest_zone.py @@ -4,11 +4,6 @@ import time -try: - import aiofiles -except ImportError: - raise ImportError('Missing required \'aiofiles\' library. (pip install aiofiles)') - default_index = 'dns-zones' record_types = ('a','aaaa','caa','cdnskey','cds','cname','dnskey','ds','mx','naptr','ns','nsec','nsec3','nsec3param','ptr','rrsig','rp','sshfp','soa','srv','txt','type65534') @@ -47,7 +42,7 @@ def construct_map() -> dict: return mapping -async def process_data(file_path: str): +def process_file(file_path: str): ''' Read and process zone file records. @@ -57,8 +52,8 @@ async def process_data(file_path: str): domain_records = {} last_domain = None - async with aiofiles.open(file_path, mode='r') as input_file: - async for line in input_file: + with open(file_path, 'r') as file: + for line in file: line = line.strip() if not line or line.startswith(';'): @@ -93,11 +88,11 @@ async def process_data(file_path: str): if domain != last_domain: if last_domain: - struct = {'domain': last_domain, 'records': domain_records[last_domain], 'seen': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())} + source = {'domain': last_domain, 'records': domain_records[last_domain], 'seen': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())} del domain_records[last_domain] - yield {'_index': default_index, '_source': struct} + yield source last_domain = domain diff --git a/async_dev/sniff_patch.py b/old/sniff_patch.py similarity index 72% rename from async_dev/sniff_patch.py rename to old/sniff_patch.py index 0500cf0..70caa2f 100644 --- a/async_dev/sniff_patch.py +++ b/old/sniff_patch.py @@ -1,6 +1,6 @@ #!/usr/bin/env python # Elasticsearch Recon Ingestion Scripts (ERIS) - Developed by Acidvegas (https://git.acid.vegas/eris) -# sniff_patch.py [asyncronous developement] +# sniff_patch.py # Note: # This is a patch for the elasticsearch 8.x client to fix the sniff_* options. @@ -12,23 +12,23 @@ import base64 -import elasticsearch._async.client as async_client +import elasticsearch._sync.client as client from elasticsearch.exceptions import SerializationError, ConnectionError -async def init_elasticsearch_async(*args, **kwargs): +def init_elasticsearch(*args, **kwargs): ''' - Initialize the Async Elasticsearch client with the sniff patch. + Initialize the Elasticsearch client with the sniff patch. - :param args: Async Elasticsearch positional arguments. - :param kwargs: Async Elasticsearch keyword arguments. + :param args: Elasticsearch positional arguments. + :param kwargs: Elasticsearch keyword arguments. ''' - async_client.default_sniff_callback = _override_async_sniff_callback(kwargs['basic_auth']) + client.default_sniff_callback = _override_sniff_callback(kwargs['basic_auth']) - return async_client.AsyncElasticsearch(*args, **kwargs) + return client.Elasticsearch(*args, **kwargs) -def _override_async_sniff_callback(basic_auth): +def _override_sniff_callback(basic_auth): ''' Taken from https://github.com/elastic/elasticsearch-py/blob/8.8/elasticsearch/_sync/client/_base.py#L166 Completely unmodified except for adding the auth header to the elastic request. @@ -38,19 +38,19 @@ def _override_async_sniff_callback(basic_auth): - https://github.com/elastic/elasticsearch-py/issues/2005 ''' auth_str = base64.b64encode(':'.join(basic_auth).encode()).decode() - sniffed_node_callback = async_client._base._default_sniffed_node_callback + sniffed_node_callback = client._base._default_sniffed_node_callback - async def modified_async_sniff_callback(transport, sniff_options): + def modified_sniff_callback(transport, sniff_options): for _ in transport.node_pool.all(): try: - meta, node_infos = await transport.perform_request( + meta, node_infos = transport.perform_request( 'GET', '/_nodes/_all/http', - headers={ + headers = { 'accept': 'application/vnd.elasticsearch+json; compatible-with=8', - 'authorization': f'Basic {auth_str}' # This auth header is missing in 8.x releases of the client, and causes 401s + 'authorization': f'Basic {auth_str}' # This auth header is missing in 8.x releases of the client, and causes 401s }, - request_timeout=( + request_timeout = ( sniff_options.sniff_timeout if not sniff_options.is_initial_sniff else None @@ -79,7 +79,7 @@ def _override_async_sniff_callback(basic_auth): port = int(port_str) assert sniffed_node_callback is not None - sniffed_node = await sniffed_node_callback( + sniffed_node = sniffed_node_callback( node_info, meta.node.replace(host=host, port=port) ) if sniffed_node is None: @@ -93,4 +93,4 @@ def _override_async_sniff_callback(basic_auth): return [] - return modified_async_sniff_callback \ No newline at end of file + return modified_sniff_callback \ No newline at end of file diff --git a/sniff_patch.py b/sniff_patch.py index 70caa2f..0500cf0 100644 --- a/sniff_patch.py +++ b/sniff_patch.py @@ -1,6 +1,6 @@ #!/usr/bin/env python # Elasticsearch Recon Ingestion Scripts (ERIS) - Developed by Acidvegas (https://git.acid.vegas/eris) -# sniff_patch.py +# sniff_patch.py [asyncronous developement] # Note: # This is a patch for the elasticsearch 8.x client to fix the sniff_* options. @@ -12,23 +12,23 @@ import base64 -import elasticsearch._sync.client as client +import elasticsearch._async.client as async_client from elasticsearch.exceptions import SerializationError, ConnectionError -def init_elasticsearch(*args, **kwargs): +async def init_elasticsearch_async(*args, **kwargs): ''' - Initialize the Elasticsearch client with the sniff patch. + Initialize the Async Elasticsearch client with the sniff patch. - :param args: Elasticsearch positional arguments. - :param kwargs: Elasticsearch keyword arguments. + :param args: Async Elasticsearch positional arguments. + :param kwargs: Async Elasticsearch keyword arguments. ''' - client.default_sniff_callback = _override_sniff_callback(kwargs['basic_auth']) + async_client.default_sniff_callback = _override_async_sniff_callback(kwargs['basic_auth']) - return client.Elasticsearch(*args, **kwargs) + return async_client.AsyncElasticsearch(*args, **kwargs) -def _override_sniff_callback(basic_auth): +def _override_async_sniff_callback(basic_auth): ''' Taken from https://github.com/elastic/elasticsearch-py/blob/8.8/elasticsearch/_sync/client/_base.py#L166 Completely unmodified except for adding the auth header to the elastic request. @@ -38,19 +38,19 @@ def _override_sniff_callback(basic_auth): - https://github.com/elastic/elasticsearch-py/issues/2005 ''' auth_str = base64.b64encode(':'.join(basic_auth).encode()).decode() - sniffed_node_callback = client._base._default_sniffed_node_callback + sniffed_node_callback = async_client._base._default_sniffed_node_callback - def modified_sniff_callback(transport, sniff_options): + async def modified_async_sniff_callback(transport, sniff_options): for _ in transport.node_pool.all(): try: - meta, node_infos = transport.perform_request( + meta, node_infos = await transport.perform_request( 'GET', '/_nodes/_all/http', - headers = { + headers={ 'accept': 'application/vnd.elasticsearch+json; compatible-with=8', - 'authorization': f'Basic {auth_str}' # This auth header is missing in 8.x releases of the client, and causes 401s + 'authorization': f'Basic {auth_str}' # This auth header is missing in 8.x releases of the client, and causes 401s }, - request_timeout = ( + request_timeout=( sniff_options.sniff_timeout if not sniff_options.is_initial_sniff else None @@ -79,7 +79,7 @@ def _override_sniff_callback(basic_auth): port = int(port_str) assert sniffed_node_callback is not None - sniffed_node = sniffed_node_callback( + sniffed_node = await sniffed_node_callback( node_info, meta.node.replace(host=host, port=port) ) if sniffed_node is None: @@ -93,4 +93,4 @@ def _override_sniff_callback(basic_auth): return [] - return modified_sniff_callback \ No newline at end of file + return modified_async_sniff_callback \ No newline at end of file