Asyncronous developed mon ERIS is complete, need to refactor ingestion helpers before pushing this as the main version
This commit is contained in:
parent
31287a99d2
commit
b6fb68ba3a
@ -86,6 +86,7 @@ Create & add a geoip pipeline and use the following in your index mappings:
|
|||||||
## Roadmap
|
## Roadmap
|
||||||
- Implement [async elasticsearch](https://elasticsearch-py.readthedocs.io/en/v8.12.1/async.html) into the code.
|
- Implement [async elasticsearch](https://elasticsearch-py.readthedocs.io/en/v8.12.1/async.html) into the code.
|
||||||
- WHOIS database ingestion scripts
|
- WHOIS database ingestion scripts
|
||||||
|
- Dynamically update the batch metrics when the sniffer adds or removes nodes
|
||||||
|
|
||||||
___
|
___
|
||||||
|
|
||||||
|
@ -2,11 +2,11 @@
|
|||||||
# Elasticsearch Recon Ingestion Scripts (ERIS) - Developed by Acidvegas (https://git.acid.vegas/eris)
|
# Elasticsearch Recon Ingestion Scripts (ERIS) - Developed by Acidvegas (https://git.acid.vegas/eris)
|
||||||
# eris.py [asyncronous developement]
|
# eris.py [asyncronous developement]
|
||||||
|
|
||||||
|
import asyncio
|
||||||
import argparse
|
import argparse
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import stat
|
import stat
|
||||||
import time
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
sys.dont_write_bytecode = True
|
sys.dont_write_bytecode = True
|
||||||
@ -31,12 +31,10 @@ class ElasticIndexer:
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
self.chunk_size = args.chunk_size
|
self.chunk_size = args.chunk_size
|
||||||
self.chunk_threads = args.chunk_threads
|
self.es = None
|
||||||
self.dry_run = args.dry_run
|
|
||||||
self.es_index = args.index
|
self.es_index = args.index
|
||||||
|
|
||||||
if not args.dry_run:
|
self.es_config = {
|
||||||
es_config = {
|
|
||||||
'hosts': [f'{args.host}:{args.port}'],
|
'hosts': [f'{args.host}:{args.port}'],
|
||||||
'verify_certs': args.self_signed,
|
'verify_certs': args.self_signed,
|
||||||
'ssl_show_warn': args.self_signed,
|
'ssl_show_warn': args.self_signed,
|
||||||
@ -49,23 +47,28 @@ class ElasticIndexer:
|
|||||||
}
|
}
|
||||||
|
|
||||||
if args.api_key:
|
if args.api_key:
|
||||||
es_config['api_key'] = (args.key, '') # Verify this is correct
|
self.es_config['api_key'] = (args.api_key, '') # Verify this is correct
|
||||||
else:
|
else:
|
||||||
es_config['basic_auth'] = (args.user, args.password)
|
self.es_config['basic_auth'] = (args.user, args.password)
|
||||||
|
|
||||||
|
|
||||||
|
async def initialize(self):
|
||||||
|
'''Initialize the Elasticsearch client.'''
|
||||||
|
|
||||||
# Patching the Elasticsearch client to fix a bug with sniffing (https://github.com/elastic/elasticsearch-py/issues/2005#issuecomment-1645641960)
|
# Patching the Elasticsearch client to fix a bug with sniffing (https://github.com/elastic/elasticsearch-py/issues/2005#issuecomment-1645641960)
|
||||||
import sniff_patch
|
import sniff_patch
|
||||||
self.es = sniff_patch.init_elasticsearch(**es_config)
|
self.es = sniff_patch.init_elasticsearch(**self.es_config)
|
||||||
|
|
||||||
# Remove the above and uncomment the below if the bug is fixed in the Elasticsearch client:
|
# Remove the above and uncomment the below if the bug is fixed in the Elasticsearch client:
|
||||||
#self.es = AsyncElasticsearch(**es_config)
|
#self.es = AsyncElasticsearch(**es_config)
|
||||||
|
|
||||||
|
|
||||||
async def create_index(self, map_body: dict, pipeline: str = '', replicas: int = 1, shards: int = 1, ):
|
async def create_index(self, map_body: dict, pipeline: str = '', replicas: int = 1, shards: int = 1):
|
||||||
'''
|
'''
|
||||||
Create the Elasticsearch index with the defined mapping.
|
Create the Elasticsearch index with the defined mapping.
|
||||||
|
|
||||||
:param pipline: Name of the ingest pipeline to use for the index
|
:param map_body: Mapping for the index
|
||||||
|
:param pipeline: Name of the ingest pipeline to use for the index
|
||||||
:param replicas: Number of replicas for the index
|
:param replicas: Number of replicas for the index
|
||||||
:param shards: Number of shards for the index
|
:param shards: Number of shards for the index
|
||||||
'''
|
'''
|
||||||
@ -112,7 +115,7 @@ class ElasticIndexer:
|
|||||||
return number_of_nodes
|
return number_of_nodes
|
||||||
|
|
||||||
|
|
||||||
async def async_bulk_index_data(self, file_path: str, index_name: str, data_generator: callable):
|
async def process_data(self, file_path: str, data_generator: callable):
|
||||||
'''
|
'''
|
||||||
Index records in chunks to Elasticsearch.
|
Index records in chunks to Elasticsearch.
|
||||||
|
|
||||||
@ -124,11 +127,11 @@ class ElasticIndexer:
|
|||||||
count = 0
|
count = 0
|
||||||
total = 0
|
total = 0
|
||||||
|
|
||||||
async for ok, result in async_streaming_bulk(self.es, index_name=self.es_index, actions=data_generator(file_path), chunk_size=self.chunk_size):
|
async for ok, result in async_streaming_bulk(self.es, actions=data_generator(file_path), chunk_size=self.chunk_size):
|
||||||
action, result = result.popitem()
|
action, result = result.popitem()
|
||||||
|
|
||||||
if not ok:
|
if not ok:
|
||||||
logging.error(f'Failed to index document ({result["_id"]}) to {index_name} from {file_path} ({result})')
|
logging.error(f'Failed to index document ({result["_id"]}) to {self.es_index} from {file_path} ({result})')
|
||||||
input('Press Enter to continue...') # Debugging (will possibly remove this since we have retries enabled)
|
input('Press Enter to continue...') # Debugging (will possibly remove this since we have retries enabled)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@ -139,41 +142,16 @@ class ElasticIndexer:
|
|||||||
logging.info(f'Successfully indexed {self.chunk_size:,} ({total:,} processed) records to {self.es_index} from {file_path}')
|
logging.info(f'Successfully indexed {self.chunk_size:,} ({total:,} processed) records to {self.es_index} from {file_path}')
|
||||||
count = 0
|
count = 0
|
||||||
|
|
||||||
logging.info(f'Finished indexing {self.total:,} records to {self.es_index} from {file_path}')
|
logging.info(f'Finished indexing {total:,} records to {self.es_index} from {file_path}')
|
||||||
|
|
||||||
|
|
||||||
async def process_file(self, file_path: str, ingest_function: callable):
|
async def main():
|
||||||
'''
|
|
||||||
Read and index records in batches to Elasticsearch.
|
|
||||||
|
|
||||||
:param file_path: Path to the file
|
|
||||||
:param batch_size: Number of records to index per batch
|
|
||||||
:param ingest_function: Function to process the file
|
|
||||||
'''
|
|
||||||
|
|
||||||
count = 0
|
|
||||||
|
|
||||||
async for processed in ingest_function(file_path):
|
|
||||||
if not processed:
|
|
||||||
break
|
|
||||||
|
|
||||||
if self.dry_run:
|
|
||||||
print(processed)
|
|
||||||
continue
|
|
||||||
|
|
||||||
count += 1
|
|
||||||
|
|
||||||
yield {'_index': self.es_index, '_source': processed}
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
'''Main function when running this script directly.'''
|
'''Main function when running this script directly.'''
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description='Index data into Elasticsearch.')
|
parser = argparse.ArgumentParser(description='Index data into Elasticsearch.')
|
||||||
|
|
||||||
# General arguments
|
# General arguments
|
||||||
parser.add_argument('input_path', help='Path to the input file or directory') # Required
|
parser.add_argument('input_path', help='Path to the input file or directory') # Required
|
||||||
parser.add_argument('--dry-run', action='store_true', help='Dry run (do not index records to Elasticsearch)')
|
|
||||||
parser.add_argument('--watch', action='store_true', help='Create or watch a FIFO for real-time indexing')
|
parser.add_argument('--watch', action='store_true', help='Create or watch a FIFO for real-time indexing')
|
||||||
|
|
||||||
# Elasticsearch arguments
|
# Elasticsearch arguments
|
||||||
@ -192,7 +170,6 @@ def main():
|
|||||||
|
|
||||||
# Performance arguments
|
# Performance arguments
|
||||||
parser.add_argument('--chunk-size', type=int, default=50000, help='Number of records to index in a chunk')
|
parser.add_argument('--chunk-size', type=int, default=50000, help='Number of records to index in a chunk')
|
||||||
parser.add_argument('--chunk-threads', type=int, default=3, help='Number of threads to use when indexing in chunks')
|
|
||||||
parser.add_argument('--retries', type=int, default=60, help='Number of times to retry indexing a chunk before failing')
|
parser.add_argument('--retries', type=int, default=60, help='Number of times to retry indexing a chunk before failing')
|
||||||
parser.add_argument('--timeout', type=int, default=30, help='Number of seconds to wait before retrying a chunk')
|
parser.add_argument('--timeout', type=int, default=30, help='Number of seconds to wait before retrying a chunk')
|
||||||
|
|
||||||
@ -214,6 +191,7 @@ def main():
|
|||||||
raise FileNotFoundError(f'Input path {args.input_path} does not exist or is not a file or directory')
|
raise FileNotFoundError(f'Input path {args.input_path} does not exist or is not a file or directory')
|
||||||
|
|
||||||
edx = ElasticIndexer(args)
|
edx = ElasticIndexer(args)
|
||||||
|
await edx.initialize() # Initialize the Elasticsearch client asyncronously
|
||||||
|
|
||||||
if args.cert:
|
if args.cert:
|
||||||
from ingestors import ingest_certs as ingestor
|
from ingestors import ingest_certs as ingestor
|
||||||
@ -226,31 +204,27 @@ def main():
|
|||||||
elif args.zone:
|
elif args.zone:
|
||||||
from ingestors import ingest_zone as ingestor
|
from ingestors import ingest_zone as ingestor
|
||||||
|
|
||||||
batch_size = 0
|
health = await edx.get_cluster_health()
|
||||||
|
print(health)
|
||||||
|
|
||||||
if not args.dry_run:
|
await asyncio.sleep(5) # Delay to allow time for sniffing to complete
|
||||||
print(edx.get_cluster_health())
|
|
||||||
|
|
||||||
time.sleep(3) # Delay to allow time for sniffing to complete
|
nodes = await edx.get_cluster_size()
|
||||||
|
|
||||||
nodes = edx.get_cluster_size()
|
|
||||||
logging.info(f'Connected to {nodes:,} Elasticsearch node(s)')
|
logging.info(f'Connected to {nodes:,} Elasticsearch node(s)')
|
||||||
|
|
||||||
if not edx.es_index:
|
if not edx.es_index:
|
||||||
edx.es_index = ingestor.default_index
|
edx.es_index = ingestor.default_index
|
||||||
|
|
||||||
map_body = ingestor.construct_map()
|
map_body = ingestor.construct_map()
|
||||||
edx.create_index(map_body, args.pipeline, args.replicas, args.shards)
|
await edx.create_index(map_body, args.pipeline, args.replicas, args.shards)
|
||||||
|
|
||||||
batch_size = int(nodes * (args.chunk_size * args.chunk_threads))
|
|
||||||
|
|
||||||
if os.path.isfile(args.input_path):
|
if os.path.isfile(args.input_path):
|
||||||
logging.info(f'Processing file: {args.input_path}')
|
logging.info(f'Processing file: {args.input_path}')
|
||||||
edx.process_file(args.input_path, batch_size, ingestor.process_file)
|
await edx.process_data(args.input_path, ingestor.process_data)
|
||||||
|
|
||||||
elif stat.S_ISFIFO(os.stat(args.input_path).st_mode):
|
elif stat.S_ISFIFO(os.stat(args.input_path).st_mode):
|
||||||
logging.info(f'Watching FIFO: {args.input_path}')
|
logging.info(f'Watching FIFO: {args.input_path}')
|
||||||
edx.process_file(args.input_path, batch_size, ingestor.process_file)
|
await edx.process_data(args.input_path, ingestor.process_data)
|
||||||
|
|
||||||
elif os.path.isdir(args.input_path):
|
elif os.path.isdir(args.input_path):
|
||||||
count = 1
|
count = 1
|
||||||
@ -260,7 +234,7 @@ def main():
|
|||||||
file_path = os.path.join(args.input_path, file)
|
file_path = os.path.join(args.input_path, file)
|
||||||
if os.path.isfile(file_path):
|
if os.path.isfile(file_path):
|
||||||
logging.info(f'[{count:,}/{total:,}] Processing file: {file_path}')
|
logging.info(f'[{count:,}/{total:,}] Processing file: {file_path}')
|
||||||
edx.process_file(file_path, batch_size, ingestor.process_file)
|
await edx.process_data(file_path, ingestor.process_data)
|
||||||
count += 1
|
count += 1
|
||||||
else:
|
else:
|
||||||
logging.warning(f'[{count:,}/{total:,}] Skipping non-file: {file_path}')
|
logging.warning(f'[{count:,}/{total:,}] Skipping non-file: {file_path}')
|
||||||
@ -268,4 +242,4 @@ def main():
|
|||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
asyncio.run(main())
|
@ -91,8 +91,12 @@ def construct_map() -> dict:
|
|||||||
return mapping
|
return mapping
|
||||||
|
|
||||||
|
|
||||||
async def process():
|
async def process_data(file_path: str = None):
|
||||||
'''Read and process Certsream records live from the Websocket stream.'''
|
'''
|
||||||
|
Read and process Certsream records live from the Websocket stream.
|
||||||
|
|
||||||
|
:param file_path: Path to the Certstream log file (unused, placeholder for consistency with other ingestors)
|
||||||
|
'''
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
@ -105,6 +109,7 @@ async def process():
|
|||||||
record = json.loads(line)
|
record = json.loads(line)
|
||||||
except json.decoder.JSONDecodeError:
|
except json.decoder.JSONDecodeError:
|
||||||
logging.error(f'Failed to parse JSON record from Certstream! ({line})')
|
logging.error(f'Failed to parse JSON record from Certstream! ({line})')
|
||||||
|
input('Press Enter to continue...') # Pause the script to allow the user to read the error message
|
||||||
continue
|
continue
|
||||||
|
|
||||||
yield record
|
yield record
|
||||||
|
@ -59,7 +59,7 @@ def construct_map() -> dict:
|
|||||||
return mapping
|
return mapping
|
||||||
|
|
||||||
|
|
||||||
async def process_file(file_path: str):
|
async def process_data(file_path: str):
|
||||||
'''
|
'''
|
||||||
Read and process Masscan records from the log file.
|
Read and process Masscan records from the log file.
|
||||||
|
|
||||||
@ -79,10 +79,17 @@ async def process_file(file_path: str):
|
|||||||
try:
|
try:
|
||||||
record = json.loads(line)
|
record = json.loads(line)
|
||||||
except json.decoder.JSONDecodeError:
|
except json.decoder.JSONDecodeError:
|
||||||
|
# In rare cases, the JSON record may be incomplete or malformed:
|
||||||
|
# { "ip": "51.161.12.223", "timestamp": "1707628302", "ports": [ {"port": 22, "proto": "tcp", "service": {"name": "ssh", "banner":
|
||||||
|
# { "ip": "83.66.211.246", "timestamp": "1706557002"
|
||||||
logging.error(f'Failed to parse JSON record! ({line})')
|
logging.error(f'Failed to parse JSON record! ({line})')
|
||||||
input('Press Enter to continue...') # Debugging
|
input('Press Enter to continue...') # Pause for review & debugging (Will remove pausing in production, still investigating the cause of this issue.)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
if len(record['ports']) > 1:
|
||||||
|
logging.warning(f'Multiple ports found for record! ({record})')
|
||||||
|
input('Press Enter to continue...') # Pause for review (Will remove pausing in production, still investigating if you ever seen more than one port in a record.)
|
||||||
|
|
||||||
for port_info in record['ports']:
|
for port_info in record['ports']:
|
||||||
struct = {
|
struct = {
|
||||||
'ip' : record['ip'],
|
'ip' : record['ip'],
|
||||||
@ -93,8 +100,8 @@ async def process_file(file_path: str):
|
|||||||
|
|
||||||
if 'service' in port_info:
|
if 'service' in port_info:
|
||||||
if 'name' in port_info['service']:
|
if 'name' in port_info['service']:
|
||||||
if port_info['service']['name'] != 'unknown':
|
if (service_name := port_info['service']['name']) not in ('unknown',''):
|
||||||
struct['service'] = port_info['service']['name']
|
struct['service'] = service_name
|
||||||
|
|
||||||
if 'banner' in port_info['service']:
|
if 'banner' in port_info['service']:
|
||||||
banner = ' '.join(port_info['service']['banner'].split()) # Remove extra whitespace
|
banner = ' '.join(port_info['service']['banner'].split()) # Remove extra whitespace
|
Loading…
Reference in New Issue
Block a user