Compare commits

...

4 Commits
v1.3.4 ... main

4 changed files with 31 additions and 28 deletions

View File

@ -2,7 +2,9 @@
# ICANN API for the Centralized Zones Data Service - developed by acidvegas (https://git.acid.vegas/czds)
# czds/__init__.py
__version__ = '1.3.4'
from .client import CZDS
__version__ = '1.3.8'
__author__ = 'acidvegas'
__email__ = 'acid.vegas@acid.vegas'
__github__ = 'https://github.com/acidvegas/czds'

View File

@ -6,6 +6,8 @@ import asyncio
import json
import logging
import os
import csv
import io
try:
import aiohttp
@ -138,15 +140,12 @@ class CZDS:
Downloads the zone report stats from the API and scrubs the report for privacy
:param filepath: Filepath to save the scrubbed report
:param scrub: Whether to scrub the username from the report
:param format: Output format ('csv' or 'json')
'''
logging.info('Downloading zone stats report')
# Send the request to the API
async with self.session.get('https://czds-api.icann.org/czds/requests/report', headers=self.headers) as response:
# Check if the request was successful
if response.status != 200:
raise Exception(f'Failed to download the zone stats report: {response.status} {await response.text()}')
@ -157,9 +156,21 @@ class CZDS:
content = content.replace(self.username, 'nobody@no.name')
logging.debug('Scrubbed username from report')
# Convert the report to JSON format if requested (default is CSV)
# Convert the report to JSON format if requested
if format.lower() == 'json':
content = json.dumps(content, indent=4)
# Parse CSV content
csv_reader = csv.DictReader(io.StringIO(content))
# Convert to list of dicts with formatted keys
json_data = []
for row in csv_reader:
formatted_row = {
key.lower().replace(' ', '_'): value
for key, value in row.items()
}
json_data.append(formatted_row)
content = json.dumps(json_data, indent=4)
logging.debug('Converted report to JSON format')
# Save the report to a file if a filepath is provided

View File

@ -2,6 +2,7 @@
# ICANN API for the Centralized Zones Data Service - developed by acidvegas (https://git.acid.vegas/czds)
# czds/utils.py
import asyncio
import gzip
import logging
import os
@ -24,41 +25,30 @@ async def gzip_decompress(filepath: str, cleanup: bool = True):
:param filepath: Path to the gzip file
:param cleanup: Whether to remove the original gzip file after decompressions
'''
# Get the original size of the file
original_size = os.path.getsize(filepath)
output_path = filepath[:-3]
logging.debug(f'Decompressing {filepath} ({humanize_bytes(original_size)})...')
# Remove the .gz extension
output_path = filepath[:-3]
# Use a large chunk size (256MB) for maximum throughput
chunk_size = 256 * 1024 * 1024
# Set the chunk size to 25MB
chunk_size = 25 * 1024 * 1024
# Create progress bar for decompression
# Run the actual decompression in a thread pool to prevent blocking
with tqdm(total=original_size, unit='B', unit_scale=True, desc=f'Decompressing {os.path.basename(filepath)}', leave=False) as pbar:
# Decompress the file
with gzip.open(filepath, 'rb') as gz:
async with aiofiles.open(output_path, 'wb') as f_out:
async with aiofiles.open(output_path, 'wb') as f_out:
# Run gzip decompression in thread pool since it's CPU-bound
loop = asyncio.get_event_loop()
with gzip.open(filepath, 'rb') as gz:
while True:
# Read the next chunk
chunk = gz.read(chunk_size)
# If the chunk is empty, break
chunk = await loop.run_in_executor(None, gz.read, chunk_size)
if not chunk:
break
# Write the chunk to the output file
await f_out.write(chunk)
pbar.update(len(chunk))
# Get the decompressed size of the file
decompressed_size = os.path.getsize(output_path)
logging.debug(f'Decompressed {filepath} ({humanize_bytes(decompressed_size)})')
# If the cleanup flag is set, remove the original gzip file
if cleanup:
os.remove(filepath)
logging.debug(f'Removed original gzip file: {filepath}')

View File

@ -11,7 +11,7 @@ with open('README.md', 'r', encoding='utf-8') as fh:
setup(
name='czds-api',
version='1.3.4',
version='1.3.8',
author='acidvegas',
author_email='acid.vegas@acid.vegas',
description='ICANN API for the Centralized Zones Data Service',