Code cleanup

This commit is contained in:
Dionysus 2024-03-11 22:33:18 -04:00
parent b018da4e4d
commit 87f2cf27ea
Signed by: acidvegas
GPG Key ID: EF4B922DB85DC9DE
3 changed files with 352 additions and 421 deletions

View File

@ -5,85 +5,30 @@
import asyncio import asyncio
import json import json
import logging import logging
import time
try: try:
import websockets import websockets
except ImportError: except ImportError:
raise ImportError('Missing required \'websockets\' library. (pip install websockets)') raise ImportError('Missing required \'websockets\' library. (pip install websockets)')
# Set a default elasticsearch index if one is not provided
default_index = 'cert-stream' default_index = 'cert-stream'
def construct_map() -> dict: def construct_map() -> dict:
'''Construct the Elasticsearch index mapping for Certstream records.''' '''Construct the Elasticsearch index mapping for Certstream records.'''
# Match on exact value or full text search
keyword_mapping = { 'type': 'text', 'fields': { 'keyword': { 'type': 'keyword', 'ignore_above': 256 } } } keyword_mapping = { 'type': 'text', 'fields': { 'keyword': { 'type': 'keyword', 'ignore_above': 256 } } }
# Construct the index mapping
mapping = { mapping = {
'mappings': { 'mappings': {
'properties': { 'properties' : {
'data': { 'domain' : keyword_mapping,
'properties': { 'seen' : { 'type': 'date' }
'cert_index': { 'type': 'integer' },
'cert_link' : { 'type': 'keyword' },
'leaf_cert' : {
'properties': {
'all_domains': { 'type': 'keyword' },
'extensions': {
'properties': {
'authorityInfoAccess' : { 'type': 'text' },
'authorityKeyIdentifier' : { 'type': 'text' },
'basicConstraints' : { 'type': 'text' },
'certificatePolicies' : { 'type': 'text' },
'crlDistributionPoints' : { 'type': 'text' },
'ctlPoisonByte' : { 'type': 'boolean' },
'extendedKeyUsage' : { 'type': 'text' },
'keyUsage' : { 'type': 'text' },
'subjectAltName' : { 'type': 'text' },
'subjectKeyIdentifier' : { 'type': 'text' }
}
},
'fingerprint': { 'type': 'keyword' },
'issuer': {
'properties': {
'C' : { 'type': 'keyword' },
'CN' : { 'type': 'text' },
'L' : { 'type': 'text' },
'O' : { 'type': 'text' },
'OU' : { 'type': 'text' },
'ST' : { 'type': 'text' },
'aggregated' : { 'type': 'text' },
'emailAddress' : { 'type': 'text' }
}
},
'not_after' : { 'type': 'integer' },
'not_before' : { 'type': 'integer' },
'serial_number' : { 'type': 'keyword' },
'signature_algorithm' : { 'type': 'text' },
'subject': {
'properties': {
'C' : { 'type': 'keyword' },
'CN' : { 'type': 'text' },
'L' : { 'type': 'text' },
'O' : { 'type': 'text' },
'OU' : { 'type': 'text' },
'ST' : { 'type': 'text' },
'aggregated' : { 'type': 'text' },
'emailAddress' : { 'type': 'text' }
}
}
}
},
'seen': { 'type': 'date', 'format': 'epoch_second' },
'source': {
'properties': {
'name' : { 'type': 'keyword' },
'url' : { 'type': 'keyword' }
}
},
'update_type': { 'type': 'keyword' }
}
},
'message_type': { 'type': 'keyword' }
} }
} }
} }
@ -100,21 +45,30 @@ async def process_data(place_holder: str = None):
while True: while True:
try: try:
async with websockets.connect('wss://certstream.calidog.io/') as websocket: async with websockets.connect('wss://certstream.calidog.io') as websocket:
while True: while True:
# Read a line from the websocket
line = await websocket.recv() line = await websocket.recv()
if line == '~eof': # Sentinel value to indicate the end of a process (Used with --watch with FIFO) # Parse the JSON record
break
try: try:
record = json.loads(line) record = json.loads(line)
except json.decoder.JSONDecodeError: except json.decoder.JSONDecodeError:
logging.error(f'Failed to parse JSON record from Certstream! ({line})') logging.error(f'Invalid line from the websocket: {line}')
input('Press Enter to continue...')
continue continue
yield record # Grab the unique domains from the record (excluding wildcards)
domains = record['data']['leaf_cert']['all_domains']
domains = set([domain[2:] if domain.startswith('*.') else domain for domain in domains])
# Construct the document
for domain in domains:
struct = {
'domain' : domain,
'seen' : time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
}
yield {'_id': id, '_index': default_index, '_source': struct}
except websockets.ConnectionClosed: except websockets.ConnectionClosed:
logging.error('Connection to Certstream was closed. Attempting to reconnect...') logging.error('Connection to Certstream was closed. Attempting to reconnect...')
@ -122,40 +76,11 @@ async def process_data(place_holder: str = None):
except Exception as e: except Exception as e:
logging.error(f'An error occurred while processing Certstream records! ({e})') logging.error(f'An error occurred while processing Certstream records! ({e})')
await asyncio.sleep(15) break
async def strip_struct_empty(data: dict) -> dict:
'''
Recursively remove empty values from a nested dictionary or list.
:param data: The dictionary or list to clean.
'''
empties = [None, '', [], {}]
if isinstance(data, dict):
for key, value in list(data.items()):
if value in empties:
del data[key]
else:
cleaned_value = strip_struct_empty(value)
if cleaned_value in empties:
del data[key]
else:
data[key] = cleaned_value
return data
elif isinstance(data, list):
return [strip_struct_empty(item) for item in data if item not in empties and strip_struct_empty(item) not in empties]
else:
return data
async def test(): async def test():
'''Test the Cert stream ingestion process''' '''Test the ingestion process.'''
async for document in process_data(): async for document in process_data():
print(document) print(document)
@ -163,14 +88,9 @@ async def test():
if __name__ == '__main__': if __name__ == '__main__':
import argparse
import asyncio import asyncio
parser = argparse.ArgumentParser(description='Certstream Ingestor for ERIS') asyncio.run(test())
parser.add_argument('input_path', help='Path to the input file or directory')
args = parser.parse_args()
asyncio.run(test(args.input_path))

View File

@ -11,15 +11,20 @@ except ImportError:
raise ImportError('Missing required \'aiofiles\' library. (pip install aiofiles)') raise ImportError('Missing required \'aiofiles\' library. (pip install aiofiles)')
# Set a default elasticsearch index if one is not provided
default_index = 'dns-zones' default_index = 'dns-zones'
# Known DNS record types found in zone files
record_types = ('a','aaaa','caa','cdnskey','cds','cname','dnskey','ds','mx','naptr','ns','nsec','nsec3','nsec3param','ptr','rrsig','rp','sshfp','soa','srv','txt','type65534') record_types = ('a','aaaa','caa','cdnskey','cds','cname','dnskey','ds','mx','naptr','ns','nsec','nsec3','nsec3param','ptr','rrsig','rp','sshfp','soa','srv','txt','type65534')
def construct_map() -> dict: def construct_map() -> dict:
'''Construct the Elasticsearch index mapping for zone file records.''' '''Construct the Elasticsearch index mapping for zone file records.'''
# Match on exact value or full text search
keyword_mapping = { 'type': 'text', 'fields': { 'keyword': { 'type': 'keyword', 'ignore_above': 256 } } } keyword_mapping = { 'type': 'text', 'fields': { 'keyword': { 'type': 'keyword', 'ignore_above': 256 } } }
# Construct the index mapping
mapping = { mapping = {
'mappings': { 'mappings': {
'properties': { 'properties': {
@ -31,18 +36,11 @@ def construct_map() -> dict:
} }
# Add record types to mapping dynamically to not clutter the code # Add record types to mapping dynamically to not clutter the code
for item in record_types: for record_type in record_types:
if item in ('a','aaaa'): if record_type in ('a','aaaa'):
mapping['mappings']['properties']['records']['properties'][item] = { mapping['mappings']['properties']['records']['properties'][record_type] = {
'properties': { 'properties': {
'data': { 'type': 'ip' }, 'data': { 'type': 'ip' if record_type in ('a','aaaa') else keyword_mapping},
'ttl': { 'type': 'integer' }
}
}
else:
mapping['mappings']['properties']['records']['properties'][item] = {
'properties': {
'data': keyword_mapping,
'ttl': { 'type': 'integer' } 'ttl': { 'type': 'integer' }
} }
} }
@ -52,43 +50,53 @@ def construct_map() -> dict:
async def process_data(file_path: str): async def process_data(file_path: str):
''' '''
Read and process zone file records. Read and process the input file
:param file_path: Path to the zone file :param input_path: Path to the input file
''' '''
async with aiofiles.open(file_path) as input_file: async with aiofiles.open(file_path) as input_file:
# Initialize the cache
last = None last = None
# Read the input file line by line
async for line in input_file: async for line in input_file:
line = line.strip() line = line.strip()
if line == '~eof': # Sentinel value to indicate the end of a process (Used with --watch with FIFO) # Sentinel value to indicate the end of a process (for closing out a FIFO stream)
return last if line == '~eof':
yield last
break
# Skip empty lines and comments
if not line or line.startswith(';'): if not line or line.startswith(';'):
continue continue
# Split the line into its parts
parts = line.split() parts = line.split()
# Ensure the line has at least 3 parts
if len(parts) < 5: if len(parts) < 5:
logging.warning(f'Invalid line: {line}') logging.warning(f'Invalid line: {line}')
continue
# Split the record into its parts
domain, ttl, record_class, record_type, data = parts[0].rstrip('.').lower(), parts[1], parts[2].lower(), parts[3].lower(), ' '.join(parts[4:]) domain, ttl, record_class, record_type, data = parts[0].rstrip('.').lower(), parts[1], parts[2].lower(), parts[3].lower(), ' '.join(parts[4:])
# Ensure the TTL is a number
if not ttl.isdigit(): if not ttl.isdigit():
logging.warning(f'Invalid TTL: {ttl} with line: {line}') logging.warning(f'Invalid TTL: {ttl} with line: {line}')
continue continue
else:
ttl = int(ttl) ttl = int(ttl)
# Anomaly...Doubtful any CHAOS/HESIOD records will be found in zone files # Do not index other record classes (doubtful any CHAOS/HESIOD records will be found in zone files)
if record_class != 'in': if record_class != 'in':
logging.warning(f'Unsupported record class: {record_class} with line: {line}') logging.warning(f'Unsupported record class: {record_class} with line: {line}')
continue continue
# We do not want to collide with our current mapping (Again, this is an anomaly) # Do not index other record types
if record_type not in record_types: if record_type not in record_types:
logging.warning(f'Unsupported record type: {record_type} with line: {line}') logging.warning(f'Unsupported record type: {record_type} with line: {line}')
continue continue
@ -101,16 +109,18 @@ async def process_data(file_path: str):
elif data.endswith('.'): elif data.endswith('.'):
data = data.rstrip('.') data = data.rstrip('.')
# Check if we are still processing the same domain
if last: if last:
if domain == last['domain']: if domain == last['domain']: # This record is for the same domain as the cached document
if record_type in last['_doc']['records']: if record_type in last['_doc']['records']:
last['_doc']['records'][record_type].append({'ttl': ttl, 'data': data}) # Do we need to check for duplicate records? last['_doc']['records'][record_type].append({'ttl': ttl, 'data': data}) # Do we need to check for duplicate records?
else: else:
last['_doc']['records'][record_type] = [{'ttl': ttl, 'data': data}] last['_doc']['records'][record_type] = [{'ttl': ttl, 'data': data}]
continue continue
else: else:
yield last yield last # Return the last document and start a new one
# Cache the document
last = { last = {
'_op_type' : 'update', '_op_type' : 'update',
'_id' : domain, '_id' : domain,
@ -126,10 +136,11 @@ async def process_data(file_path: str):
async def test(input_path: str): async def test(input_path: str):
''' '''
Test the Zone file ingestion process Test the ingestion process
:param input_path: Path to the MassDNS log file :param input_path: Path to the input file
''' '''
async for document in process_data(input_path): async for document in process_data(input_path):
print(document) print(document)
@ -139,7 +150,7 @@ if __name__ == '__main__':
import argparse import argparse
import asyncio import asyncio
parser = argparse.ArgumentParser(description='Zone file Ingestor for ERIS') parser = argparse.ArgumentParser(description='Ingestor for ERIS')
parser.add_argument('input_path', help='Path to the input file or directory') parser.add_argument('input_path', help='Path to the input file or directory')
args = parser.parse_args() args = parser.parse_args()