Testing function added to every ingestor to debug directly. No more --dry-run needed.

This commit is contained in:
Dionysus 2024-03-07 23:31:30 -05:00
parent b78b99e060
commit 45f878285c
Signed by: acidvegas
GPG Key ID: EF4B922DB85DC9DE
6 changed files with 273 additions and 193 deletions

View File

@ -8,6 +8,7 @@ The is a suite of tools to aid in the ingestion of recon data from various sourc
- [elasticsearch](https://pypi.org/project/elasticsearch/) *(`pip install elasticsearch`)* - [elasticsearch](https://pypi.org/project/elasticsearch/) *(`pip install elasticsearch`)*
- [aiofiles](https://pypi.org/project/aiofiles) *(`pip install aiofiles`)* - [aiofiles](https://pypi.org/project/aiofiles) *(`pip install aiofiles`)*
- [aiohttp](https://pypi.org/projects/aiohttp) *(`pip install aiohttp`)* - [aiohttp](https://pypi.org/projects/aiohttp) *(`pip install aiohttp`)*
- [websockets](https://pypi.org/project/websockets/) *(`pip install websockets`) (only required for `--certs` ingestion)*
## Usage ## Usage
```shell ```shell

View File

@ -91,11 +91,11 @@ def construct_map() -> dict:
return mapping return mapping
async def process_data(file_path: str = None): async def process_data(place_holder: str = None):
''' '''
Read and process Certsream records live from the Websocket stream. Read and process Certsream records live from the Websocket stream.
:param file_path: Path to the Certstream log file (unused, placeholder for consistency with other ingestors) :param place_holder: Placeholder parameter to match the process_data function signature of other ingestors.
''' '''
while True: while True:
@ -154,9 +154,28 @@ async def strip_struct_empty(data: dict) -> dict:
return data return data
async def test():
'''Test the Cert stream ingestion process'''
async for document in process_data():
print(document)
if __name__ == '__main__':
import argparse
import asyncio
parser = argparse.ArgumentParser(description='Certstream Ingestor for ERIS')
parser.add_argument('input_path', help='Path to the input file or directory')
args = parser.parse_args()
asyncio.run(test(args.input_path))
''' '''
Example record: Output:
{ {
"data": { "data": {
"cert_index": 43061646, "cert_index": 43061646,

View File

@ -82,9 +82,36 @@ async def process_data(file_path: str):
yield {'_id': record['domain'], '_index': default_index, '_source': record} yield {'_id': record['domain'], '_index': default_index, '_source': record}
async def test(input_path: str):
'''
Test the HTTPX ingestion process
:param input_path: Path to the HTTPX log file
'''
async for document in process_data(input_path):
print(document)
if __name__ == '__main__':
import argparse
import asyncio
parser = argparse.ArgumentParser(description='HTTPX Ingestor for ERIS')
parser.add_argument('input_path', help='Path to the input file or directory')
args = parser.parse_args()
asyncio.run(test(args.input_path))
'''' ''''
Example record: Deploy:
go install -v github.com/projectdiscovery/httpx/cmd/httpx@latest
curl -s https://public-dns.info/nameservers.txt -o nameservers.txt
httpx -l zone.txt -t 200 -sc -location -favicon -title -bp -td -ip -cname -mc 200,201,301,302,303,307,308 -fr -r nameservers.txt -retries 2 -stream -sd -j -o httpx.json -v
Output:
{ {
"timestamp":"2024-01-14T13:08:15.117348474-05:00", # Rename to seen and remove milliseconds and offset "timestamp":"2024-01-14T13:08:15.117348474-05:00", # Rename to seen and remove milliseconds and offset
"hash": { # Do we need all of these ? "hash": { # Do we need all of these ?

View File

@ -113,17 +113,48 @@ async def process_data(file_path: str):
yield {'_id': id, '_index': default_index, '_source': struct} yield {'_id': id, '_index': default_index, '_source': struct}
async def test(input_path: str):
'''
Test the Masscan ingestion process
:param input_path: Path to the MassDNS log file
'''
async for document in process_data(input_path):
print(document)
if __name__ == '__main__':
import argparse
import asyncio
parser = argparse.ArgumentParser(description='Masscan Ingestor for ERIS')
parser.add_argument('input_path', help='Path to the input file or directory')
args = parser.parse_args()
asyncio.run(test(args.input_path))
''' '''
Example record: Deploy:
apt-get install iptables masscan libpcap-dev screen
setcap 'CAP_NET_RAW+eip CAP_NET_ADMIN+eip' /bin/masscan
/sbin/iptables -A INPUT -p tcp --dport 61010 -j DROP # Not persistent
printf "0.0.0.0/8\n10.0.0.0/8\n100.64.0.0/10\n127.0.0.0/8\n169.254.0.0/16\n172.16.0.0/12\n192.0.0.0/24\n192.0.2.0/24\n192.31.196.0/24\n192.52.193.0/24\n192.88.99.0/24\n192.168.0.0/16\n192.175.48.0/24\n198.18.0.0/15\n198.51.100.0/24\n203.0.113.0/24\n224.0.0.0/3\n255.255.255.255/32" > exclude.conf
screen -S scan
masscan 0.0.0.0/0 -p21,22,23 --banners --http-user-agent "USER_AGENT" --source-port 61010 --open-only --rate 30000 --excludefile exclude.conf -oJ output.json
masscan 0.0.0.0/0 -p21,22,23 --banners --http-user-agent "USER_AGENT" --source-port 61000-65503 --open-only --rate 30000 --excludefile exclude.conf -oJ output_new.json --shard $i/$TOTAL
Output:
{ {
"ip" : "43.134.51.142", "ip" : "43.134.51.142",
"timestamp" : "1705255468", # Convert to ZULU BABY "timestamp" : "1705255468",
"ports" : [ # We will create a record for each port opened "ports" : [
{ {
"port" : 22, "port" : 22, # We will create a record for each port opened
"proto" : "tcp", "proto" : "tcp",
"service" : { # This field is optional "service" : {
"name" : "ssh", "name" : "ssh",
"banner" : "SSH-2.0-OpenSSH_8.9p1 Ubuntu-3ubuntu0.4" "banner" : "SSH-2.0-OpenSSH_8.9p1 Ubuntu-3ubuntu0.4"
} }
@ -131,7 +162,7 @@ Example record:
] ]
} }
Will be indexed as: Input:
{ {
"_id" : "43.134.51.142:22" "_id" : "43.134.51.142:22"
"_index" : "masscan-logs", "_index" : "masscan-logs",
@ -144,17 +175,3 @@ Will be indexed as:
"seen" : "2021-10-08T02:04:28Z" "seen" : "2021-10-08T02:04:28Z"
} }
''' '''
'''
Notes:
apt-get install iptables masscan libpcap-dev screen
setcap 'CAP_NET_RAW+eip CAP_NET_ADMIN+eip' /bin/masscan
/sbin/iptables -A INPUT -p tcp --dport 61010 -j DROP # Not persistent
printf "0.0.0.0/8\n10.0.0.0/8\n100.64.0.0/10\n127.0.0.0/8\n169.254.0.0/16\n172.16.0.0/12\n192.0.0.0/24\n192.0.2.0/24\n192.31.196.0/24\n192.52.193.0/24\n192.88.99.0/24\n192.168.0.0/16\n192.175.48.0/24\n198.18.0.0/15\n198.51.100.0/24\n203.0.113.0/24\n224.0.0.0/3\n255.255.255.255/32" > exclude.conf
screen -S scan
masscan 0.0.0.0/0 -p21,22,23 --banners --http-user-agent "USER_AGENT" --source-port 61010 --open-only --rate 30000 --excludefile exclude.conf -oJ output.json
masscan 0.0.0.0/0 -p21,22,23 --banners --http-user-agent "USER_AGENT" --source-port 61000-65503 --open-only --rate 30000 --excludefile exclude.conf -oJ output_new.json --shard $i/$TOTAL
'''

View File

@ -2,35 +2,6 @@
# Elasticsearch Recon Ingestion Scripts (ERIS) - Developed by Acidvegas (https://git.acid.vegas/eris) # Elasticsearch Recon Ingestion Scripts (ERIS) - Developed by Acidvegas (https://git.acid.vegas/eris)
# ingest_massdns.py # ingest_massdns.py
'''
Deployment:
git clone https://github.com/blechschmidt/massdns.git $HOME/massdns && cd $HOME/massdns && make
curl -s https://public-dns.info/nameservers.txt | grep -v ':' > $HOME/massdns/nameservers.txt
pythons ./scripts/ptr.py | ./bin/massdns -r $HOME/massdns/nameservers.txt -t PTR --filter NOERROR-s 1000 -o S -w $HOME/massdns/fifo.json
or...
while true; do python ./scripts/ptr.py | ./bin/massdns -r $HOME/massdns/nameservers.txt -t PTR --filter NOERROR -s 1000 -o S -w $HOME/massdns/fifo.json; done
Output:
0.6.229.47.in-addr.arpa. PTR 047-229-006-000.res.spectrum.com.
0.6.228.75.in-addr.arpa. PTR 0.sub-75-228-6.myvzw.com.
0.6.207.73.in-addr.arpa. PTR c-73-207-6-0.hsd1.ga.comcast.net.
Input:
{
"_id" : "47.229.6.0"
"_index" : "ptr-records",
"_source" : {
"ip" : "47.229.6.0",
"record" : "047-229-006-000.res.spectrum.com", # This will be a list if there are more than one PTR record
"seen" : "2021-06-30T18:31:00Z"
}
}
Notes:
- Why do some IP addresses return a CNAME from a PTR request
- What is dns-servfail.net (Frequent CNAME response from PTR requests)
'''
import logging import logging
import time import time
@ -162,3 +133,34 @@ if __name__ == '__main__':
args = parser.parse_args() args = parser.parse_args()
asyncio.run(test(args.input_path)) asyncio.run(test(args.input_path))
'''
Deployment:
git clone --depth 1 https://github.com/blechschmidt/massdns.git $HOME/massdns && cd $HOME/massdns && make
curl -s https://public-dns.info/nameservers.txt | grep -v ':' > $HOME/massdns/nameservers.txt
pythons ./scripts/ptr.py | ./bin/massdns -r $HOME/massdns/nameservers.txt -t PTR --filter NOERROR-s 1000 -o S -w $HOME/massdns/fifo.json
or...
while true; do python ./scripts/ptr.py | ./bin/massdns -r $HOME/massdns/nameservers.txt -t PTR --filter NOERROR -s 1000 -o S -w $HOME/massdns/fifo.json; done
Output:
0.6.229.47.in-addr.arpa. PTR 047-229-006-000.res.spectrum.com.
0.6.228.75.in-addr.arpa. PTR 0.sub-75-228-6.myvzw.com.
0.6.207.73.in-addr.arpa. PTR c-73-207-6-0.hsd1.ga.comcast.net.
Input:
{
"_id" : "47.229.6.0"
"_index" : "ptr-records",
"_source" : {
"ip" : "47.229.6.0",
"record" : "047-229-006-000.res.spectrum.com", # This will be a list if there are more than one PTR record
"seen" : "2021-06-30T18:31:00Z"
}
}
Notes:
- Why do some IP addresses return a CNAME from a PTR request
- What is dns-servfail.net (Frequent CNAME response from PTR requests)
'''

View File

@ -119,36 +119,50 @@ async def process_data(file_path: str):
domain_records[domain][record_type].append({'ttl': ttl, 'data': data}) domain_records[domain][record_type].append({'ttl': ttl, 'data': data})
async def test(input_path: str):
'''
Test the Zone file ingestion process
:param input_path: Path to the MassDNS log file
'''
async for document in process_data(input_path):
print(document)
if __name__ == '__main__':
import argparse
import asyncio
parser = argparse.ArgumentParser(description='Zone file Ingestor for ERIS')
parser.add_argument('input_path', help='Path to the input file or directory')
args = parser.parse_args()
asyncio.run(test(args.input_path))
''' '''
Example record: Output:
0so9l9nrl425q3tf7dkv1nmv2r3is6vm.vegas. 3600 in nsec3 1 1 100 332539EE7F95C32A 10MHUKG4FHIAVEFDOTF6NKU5KFCB2J3A NS DS RRSIG
0so9l9nrl425q3tf7dkv1nmv2r3is6vm.vegas. 3600 in rrsig NSEC3 8 2 3600 20240122151947 20240101141947 4125 vegas. hzIvQrZIxBSwRWyiHkb5M2W0R3ikNehv884nilkvTt9DaJSDzDUrCtqwQb3jh6+BesByBqfMQK+L2n9c//ZSmD5/iPqxmTPCuYIB9uBV2qSNSNXxCY7uUt5w7hKUS68SLwOSjaQ8GRME9WQJhY6gck0f8TT24enjXXRnQC8QitY=
1-800-flowers.vegas. 3600 in ns dns1.cscdns.net.
1-800-flowers.vegas. 3600 in ns dns2.cscdns.net.
100.vegas. 3600 in ns ns51.domaincontrol.com.
100.vegas. 3600 in ns ns52.domaincontrol.com.
1001.vegas. 3600 in ns ns11.waterrockdigital.com. 1001.vegas. 3600 in ns ns11.waterrockdigital.com.
1001.vegas. 3600 in ns ns12.waterrockdigital.com. 1001.vegas. 3600 in ns ns12.waterrockdigital.com.
Will be indexed as: Input:
{ {
"_id" : "1001.vegas" "_id" : "1001.vegas"
"_index" : "dns-zones", "_index" : "dns-zones",
"_source" : { "_source" : {
"domain" : "1001.vegas", "domain" : "1001.vegas",
"records" : { # All records are stored in a single dictionary "records" : {
"ns": [ "ns": [
{"ttl": 3600, "data": "ns11.waterrockdigital.com"}, {"ttl": 3600, "data": "ns11.waterrockdigital.com"},
{"ttl": 3600, "data": "ns12.waterrockdigital.com"} {"ttl": 3600, "data": "ns12.waterrockdigital.com"}
] ]
}, },
"seen" : "2021-09-01T00:00:00Z" # Zulu time added upon indexing "seen" : "2021-09-01T00:00:00Z"
} }
} }
'''
'''
Notes: Notes:
- How do we want to handle hashed NSEC3 records? Do we ignest them as they are, or crack the NSEC3 hashes first and ingest? How do we want to handle hashed NSEC3 records? Do we ignest them as they are, or crack the NSEC3 hashes first and ingest?
''' '''