diff --git a/dream_layer_backend/dream_layer_backend_utils/__init__.py b/dream_layer_backend/dream_layer_backend_utils/__init__.py index 10c07369..6c12edce 100644 --- a/dream_layer_backend/dream_layer_backend_utils/__init__.py +++ b/dream_layer_backend/dream_layer_backend_utils/__init__.py @@ -8,6 +8,7 @@ get_all_advanced_models ) from .workflow_execution import interrupt_workflow +from .batch_report_generator import BatchReportGenerator __all__ = [ 'read_api_keys_from_env', @@ -16,5 +17,6 @@ 'get_lora_models', 'get_upscaler_models', 'get_all_advanced_models', - 'interrupt_workflow' + 'interrupt_workflow', + 'BatchReportGenerator' ] \ No newline at end of file diff --git a/dream_layer_backend/dream_layer_backend_utils/batch_report_generator.py b/dream_layer_backend/dream_layer_backend_utils/batch_report_generator.py new file mode 100644 index 00000000..353391e2 --- /dev/null +++ b/dream_layer_backend/dream_layer_backend_utils/batch_report_generator.py @@ -0,0 +1,364 @@ +""" +Batch Report Generator Utility + +This module provides utilities to generate batch reports containing CSV data, +configuration JSON, image grids, and documentation for generated images. +""" + +import os +import csv +import json +import zipfile +import shutil +import logging +from datetime import datetime +from pathlib import Path +from typing import List, Dict, Any, Optional + +logger = logging.getLogger(__name__) + +# Constants +REQUIRED_CSV_COLUMNS = [ + 'filename', + 'prompt', + 'negative_prompt', + 'model', + 'sampler', + 'steps', + 'cfg_scale', + 'seed', + 'width', + 'height', + 'timestamp' +] + +class BatchReportGenerator: + """ + Generate batch reports containing CSV, JSON, grids, and README files. + + The generator creates a deterministic ZIP file structure with all necessary + metadata and image files for batch analysis and archival. + """ + + def __init__(self, output_dir: Optional[str] = None): + """ + Initialize the BatchReportGenerator. + + Args: + output_dir: Directory to save the report. Defaults to served_images/reports + """ + if output_dir is None: + current_dir = os.path.dirname(os.path.abspath(__file__)) + backend_dir = os.path.dirname(current_dir) + self.output_dir = os.path.join(backend_dir, 'served_images', 'reports') + else: + self.output_dir = output_dir + + os.makedirs(self.output_dir, exist_ok=True) + logger.info(f"BatchReportGenerator initialized with output directory: {self.output_dir}") + + def generate_report(self, + images_data: List[Dict[str, Any]], + config: Dict[str, Any], + report_name: Optional[str] = None) -> str: + """ + Generate a complete batch report bundle. + + Args: + images_data: List of image data dictionaries containing metadata + config: Configuration dictionary for the generation session + report_name: Optional custom name for the report + + Returns: + Path to the generated report.zip file + """ + try: + # Generate deterministic report name + if report_name is None: + timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') + report_name = f"report_{timestamp}" + + logger.info(f"Generating batch report: {report_name}") + + # Create temporary directory for report contents + temp_dir = os.path.join(self.output_dir, f"{report_name}_temp") + os.makedirs(temp_dir, exist_ok=True) + + try: + # Create results.csv + csv_path = os.path.join(temp_dir, 'results.csv') + self._create_csv(csv_path, images_data) + logger.info(f"Created CSV file with {len(images_data)} entries") + + # Create config.json + config_path = os.path.join(temp_dir, 'config.json') + self._create_config_json(config_path, config) + logger.info("Created config.json") + + # Copy selected grids/images + grids_dir = os.path.join(temp_dir, 'grids') + os.makedirs(grids_dir, exist_ok=True) + copied_count = self._copy_images(images_data, grids_dir) + logger.info(f"Copied {copied_count} images to grids directory") + + # Create README + readme_path = os.path.join(temp_dir, 'README.txt') + self._create_readme(readme_path, images_data, config) + logger.info("Created README.txt") + + # Create the zip file + zip_path = os.path.join(self.output_dir, f"{report_name}.zip") + self._create_zip(temp_dir, zip_path) + logger.info(f"Created ZIP file: {zip_path}") + + return zip_path + + finally: + # Clean up temporary directory + if os.path.exists(temp_dir): + shutil.rmtree(temp_dir) + logger.info("Cleaned up temporary directory") + + except Exception as e: + logger.error(f"Error generating batch report: {str(e)}") + raise + + def _create_csv(self, csv_path: str, images_data: List[Dict[str, Any]]) -> None: + """ + Create results.csv with image metadata. + + Args: + csv_path: Path where the CSV file will be created + images_data: List of image data dictionaries + """ + try: + with open(csv_path, 'w', newline='', encoding='utf-8') as csvfile: + fieldnames = REQUIRED_CSV_COLUMNS + ['grid_path'] + writer = csv.DictWriter(csvfile, fieldnames=fieldnames) + writer.writeheader() + + for idx, image_data in enumerate(images_data): + # Extract settings from nested structure + settings = image_data.get('settings', {}) + + # Determine grid filename with deterministic naming + original_filename = image_data.get('filename', f'image_{idx}.png') + grid_filename = f"grid_{idx:04d}_{Path(original_filename).stem}.png" + + row = { + 'filename': original_filename, + 'prompt': image_data.get('prompt', ''), + 'negative_prompt': image_data.get('negativePrompt', ''), + 'model': settings.get('model', 'unknown'), + 'sampler': settings.get('sampler', 'unknown'), + 'steps': settings.get('steps', 20), + 'cfg_scale': settings.get('cfg_scale', 7.0), + 'seed': settings.get('seed', -1), + 'width': settings.get('width', 512), + 'height': settings.get('height', 512), + 'timestamp': image_data.get('timestamp', datetime.now().isoformat()), + 'grid_path': f"grids/{grid_filename}" + } + writer.writerow(row) + + except Exception as e: + logger.error(f"Error creating CSV file: {str(e)}") + raise + + def _create_config_json(self, config_path: str, config: Dict[str, Any]) -> None: + """ + Create config.json with generation configuration. + + Args: + config_path: Path where the JSON file will be created + config: Configuration dictionary + """ + try: + # Add metadata to config + config_with_metadata = { + 'generation_config': config, + 'report_metadata': { + 'created_at': datetime.now().isoformat(), + 'version': '1.0', + 'generator': 'DreamLayer Batch Report Generator' + } + } + + with open(config_path, 'w', encoding='utf-8') as f: + json.dump(config_with_metadata, f, indent=2, ensure_ascii=False) + + except Exception as e: + logger.error(f"Error creating config.json: {str(e)}") + raise + + def _copy_images(self, images_data: List[Dict[str, Any]], grids_dir: str) -> int: + """ + Copy image files to the grids directory with deterministic names. + + Args: + images_data: List of image data dictionaries + grids_dir: Destination directory for images + + Returns: + Number of successfully copied images + """ + # Get the served images directory + current_dir = os.path.dirname(os.path.abspath(__file__)) + backend_dir = os.path.dirname(current_dir) + served_images_dir = os.path.join(backend_dir, 'served_images') + + copied_count = 0 + + for idx, image_data in enumerate(images_data): + try: + original_filename = image_data.get('filename') + if not original_filename: + logger.warning(f"Image {idx} has no filename, skipping") + continue + + # Source path + src_path = os.path.join(served_images_dir, original_filename) + + # Deterministic destination filename + grid_filename = f"grid_{idx:04d}_{Path(original_filename).stem}.png" + dest_path = os.path.join(grids_dir, grid_filename) + + # Copy file if it exists + if os.path.exists(src_path): + shutil.copy2(src_path, dest_path) + copied_count += 1 + else: + logger.warning(f"Image file not found: {src_path}") + + except Exception as e: + logger.error(f"Error copying image {idx}: {str(e)}") + + return copied_count + + def _create_readme(self, readme_path: str, images_data: List[Dict[str, Any]], config: Dict[str, Any]) -> None: + """ + Create README.txt with report information. + + Args: + readme_path: Path where the README file will be created + images_data: List of image data dictionaries + config: Configuration dictionary + """ + try: + with open(readme_path, 'w', encoding='utf-8') as f: + f.write("DreamLayer Batch Report\n") + f.write("=" * 50 + "\n\n") + + f.write(f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n") + f.write(f"Total Images: {len(images_data)}\n\n") + + f.write("Contents:\n") + f.write("---------\n") + f.write("- results.csv: Detailed metadata for all generated images\n") + f.write("- config.json: Complete generation configuration\n") + f.write("- grids/: Directory containing all generated images\n\n") + + f.write("CSV Schema:\n") + f.write("-----------\n") + for column in REQUIRED_CSV_COLUMNS: + f.write(f"- {column}\n") + f.write("- grid_path: Path to image file within this archive\n\n") + + f.write("Usage:\n") + f.write("------\n") + f.write("1. Extract this ZIP file to access all contents\n") + f.write("2. Use results.csv for batch analysis or import\n") + f.write("3. Reference grid_path column to locate specific images\n") + f.write("4. config.json contains full generation parameters\n") + + except Exception as e: + logger.error(f"Error creating README: {str(e)}") + raise + + def _create_zip(self, source_dir: str, zip_path: str) -> None: + """ + Create ZIP file from the temporary directory. + + Args: + source_dir: Directory containing files to zip + zip_path: Path where the ZIP file will be created + """ + try: + with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf: + for root, dirs, files in os.walk(source_dir): + for file in files: + file_path = os.path.join(root, file) + # Calculate archive name to maintain directory structure + arcname = os.path.relpath(file_path, source_dir) + zipf.write(file_path, arcname) + + except Exception as e: + logger.error(f"Error creating ZIP file: {str(e)}") + raise + + def validate_csv_schema(self, csv_path: str) -> bool: + """ + Validate that a CSV file contains all required columns. + + Args: + csv_path: Path to the CSV file to validate + + Returns: + True if all required columns are present + """ + try: + with open(csv_path, 'r', encoding='utf-8') as csvfile: + reader = csv.DictReader(csvfile) + headers = reader.fieldnames or [] + + # Check if all required columns are present + missing_columns = set(REQUIRED_CSV_COLUMNS) - set(headers) + if missing_columns: + logger.warning(f"Missing required columns: {missing_columns}") + return False + + return True + + except Exception as e: + logger.error(f"Error validating CSV schema: {str(e)}") + return False + + def validate_zip_contents(self, zip_path: str) -> bool: + """ + Validate that all paths in the CSV resolve to files in the ZIP. + + Args: + zip_path: Path to the ZIP file to validate + + Returns: + True if all referenced files exist in the ZIP + """ + try: + with zipfile.ZipFile(zip_path, 'r') as zipf: + # Get list of files in ZIP + zip_files = set(zipf.namelist()) + + # Extract and read the CSV + csv_content = zipf.read('results.csv').decode('utf-8') + + # Parse CSV from string + from io import StringIO + csvfile = StringIO(csv_content) + reader = csv.DictReader(csvfile) + + # Check each grid_path + missing_files = [] + for row in reader: + grid_path = row.get('grid_path', '') + if grid_path and grid_path not in zip_files: + missing_files.append(grid_path) + + if missing_files: + logger.warning(f"Missing files in ZIP: {missing_files}") + return False + + return True + + except Exception as e: + logger.error(f"Error validating ZIP contents: {str(e)}") + return False \ No newline at end of file diff --git a/dream_layer_backend/extras.py b/dream_layer_backend/extras.py index 6564d5f6..9d196e2a 100644 --- a/dream_layer_backend/extras.py +++ b/dream_layer_backend/extras.py @@ -3,11 +3,12 @@ import json import time import requests -from flask import Flask, jsonify, request, send_from_directory +from flask import Flask, jsonify, request, send_file from flask_cors import CORS import tempfile import shutil from dream_layer import get_directories +from dream_layer_backend_utils import BatchReportGenerator # Create Flask app app = Flask(__name__) @@ -313,6 +314,88 @@ def upscale_image(): # This endpoint is now handled by dream_layer.py +@app.route('/api/batch-report/generate', methods=['POST', 'OPTIONS']) +def generate_batch_report(): + """Generate a batch report ZIP file containing CSV, JSON, images, and README""" + if request.method == 'OPTIONS': + # Handle preflight request + response = jsonify({'status': 'ok'}) + response.headers.add('Access-Control-Allow-Origin', 'http://localhost:8080') + response.headers.add('Access-Control-Allow-Headers', 'Content-Type') + response.headers.add('Access-Control-Allow-Methods', 'POST, OPTIONS') + return response + + try: + data = request.json + print(f"📊 Batch report generation request received") + + # Validate required fields + if not data: + return jsonify({ + 'status': 'error', + 'message': 'No data provided' + }), 400 + + images_data = data.get('images', []) + config = data.get('config', {}) + report_name = data.get('report_name', None) + + if not images_data: + return jsonify({ + 'status': 'error', + 'message': 'No images data provided' + }), 400 + + print(f"📸 Processing {len(images_data)} images for batch report") + + # Initialize the batch report generator + generator = BatchReportGenerator() + + # Generate the report + zip_path = generator.generate_report( + images_data=images_data, + config=config, + report_name=report_name + ) + + # Validate the generated ZIP + if not os.path.exists(zip_path): + return jsonify({ + 'status': 'error', + 'message': 'Failed to generate report ZIP file' + }), 500 + + # Validate CSV schema + if not generator.validate_csv_schema(os.path.join(os.path.dirname(zip_path), 'results.csv')): + print("⚠️ Warning: CSV schema validation failed, but continuing...") + + # Validate ZIP contents + if not generator.validate_zip_contents(zip_path): + print("⚠️ Warning: ZIP contents validation failed, but continuing...") + + # Get file info + file_size = os.path.getsize(zip_path) + file_name = os.path.basename(zip_path) + + print(f"✅ Batch report generated successfully: {file_name} ({file_size} bytes)") + + # Return the file for download + return send_file( + zip_path, + mimetype='application/zip', + as_attachment=True, + download_name=file_name + ) + + except Exception as e: + print(f"❌ Error generating batch report: {str(e)}") + import traceback + traceback.print_exc() + return jsonify({ + 'status': 'error', + 'message': str(e) + }), 500 + def start_extras_server(): """Start the Extras API server""" print("\nStarting Extras API server on http://localhost:5003") diff --git a/dream_layer_backend/tests/test_batch_report_generator.py b/dream_layer_backend/tests/test_batch_report_generator.py new file mode 100644 index 00000000..e8a5efc5 --- /dev/null +++ b/dream_layer_backend/tests/test_batch_report_generator.py @@ -0,0 +1,382 @@ +""" +Test batch report generator functionality + +Tests the BatchReportGenerator class including CSV schema validation, +ZIP file creation, and deterministic file naming. +""" + +import os +import csv +import json +import zipfile +import pytest +import tempfile +import shutil +from datetime import datetime +from pathlib import Path +from dream_layer_backend_utils import BatchReportGenerator +from dream_layer_backend_utils.batch_report_generator import REQUIRED_CSV_COLUMNS + + +@pytest.fixture +def temp_output_dir(): + """Create a temporary directory for test outputs""" + temp_dir = tempfile.mkdtemp() + yield temp_dir + # Cleanup + shutil.rmtree(temp_dir) + + +@pytest.fixture +def mock_served_images_dir(monkeypatch): + """Mock the served images directory for testing""" + temp_dir = tempfile.mkdtemp() + + # Create test images + for i in range(1, 3): + filename = f'test_image_{i:03d}.png' + filepath = os.path.join(temp_dir, filename) + # Create a dummy PNG file (1x1 pixel transparent PNG) + with open(filepath, 'wb') as f: + f.write(b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x06\x00\x00\x00\x1f\x15\xc4\x89\x00\x00\x00\rIDATx\x9cc\xf8\x0f\x00\x00\x01\x01\x00\x05W\xcd\xc1\x0b\x00\x00\x00\x00IEND\xaeB`\x82') + + # Monkey patch the served images directory in the BatchReportGenerator + def mock_init(self, output_dir=None): + if output_dir is None: + self.output_dir = os.path.join(temp_dir, 'reports') + else: + self.output_dir = output_dir + os.makedirs(self.output_dir, exist_ok=True) + self._served_images_dir = temp_dir # Store the temp dir + + monkeypatch.setattr(BatchReportGenerator, '__init__', mock_init) + + # Also patch the _copy_images method to use our temp dir + original_copy_images = BatchReportGenerator._copy_images + def mock_copy_images(self, images_data, grids_dir): + # Use the stored temp dir instead of the default + served_images_dir = self._served_images_dir + copied_count = 0 + + for idx, image_data in enumerate(images_data): + try: + original_filename = image_data.get('filename') + if not original_filename: + continue + + src_path = os.path.join(served_images_dir, original_filename) + grid_filename = f"grid_{idx:04d}_{Path(original_filename).stem}.png" + dest_path = os.path.join(grids_dir, grid_filename) + + if os.path.exists(src_path): + shutil.copy2(src_path, dest_path) + copied_count += 1 + + except Exception as e: + pass + + return copied_count + + monkeypatch.setattr(BatchReportGenerator, '_copy_images', mock_copy_images) + + yield temp_dir + # Cleanup + shutil.rmtree(temp_dir) + + +@pytest.fixture +def sample_images_data(): + """Sample image data for testing""" + return [ + { + 'id': 'img_001', + 'filename': 'test_image_001.png', + 'url': 'http://localhost:5001/api/images/test_image_001.png', + 'prompt': 'A beautiful landscape', + 'negativePrompt': 'ugly, blurry', + 'timestamp': 1704067200000, # 2024-01-01 + 'settings': { + 'model': 'sd-v1-5.safetensors', + 'sampler': 'DPM++ 2M Karras', + 'steps': 30, + 'cfg_scale': 7.5, + 'seed': 12345, + 'width': 1024, + 'height': 768 + } + }, + { + 'id': 'img_002', + 'filename': 'test_image_002.png', + 'url': 'http://localhost:5001/api/images/test_image_002.png', + 'prompt': 'A futuristic city', + 'negativePrompt': 'low quality', + 'timestamp': 1704067800000, + 'settings': { + 'model': 'sdxl-base-1.0.safetensors', + 'sampler': 'Euler a', + 'steps': 25, + 'cfg_scale': 8.0, + 'seed': 54321, + 'width': 1024, + 'height': 1024 + } + } + ] + + +@pytest.fixture +def sample_config(): + """Sample configuration for testing""" + return { + 'session_id': 'test_session_123', + 'generation_date': '2024-01-01T00:00:00Z', + 'total_images': 2, + 'user_settings': { + 'theme': 'dark', + 'auto_save': True + } + } + + +class TestBatchReportGenerator: + """Test the BatchReportGenerator class""" + + def test_initialization(self, temp_output_dir): + """Test generator initialization""" + generator = BatchReportGenerator(output_dir=temp_output_dir) + assert generator.output_dir == temp_output_dir + assert os.path.exists(temp_output_dir) + + def test_csv_schema_validation(self, temp_output_dir): + """Test CSV schema validation with required columns""" + generator = BatchReportGenerator(output_dir=temp_output_dir) + + # Create a valid CSV + csv_path = os.path.join(temp_output_dir, 'test_valid.csv') + with open(csv_path, 'w', newline='', encoding='utf-8') as f: + writer = csv.DictWriter(f, fieldnames=REQUIRED_CSV_COLUMNS + ['grid_path']) + writer.writeheader() + writer.writerow({ + 'filename': 'test.png', + 'prompt': 'test prompt', + 'negative_prompt': 'test negative', + 'model': 'test_model', + 'sampler': 'test_sampler', + 'steps': 20, + 'cfg_scale': 7.0, + 'seed': 12345, + 'width': 512, + 'height': 512, + 'timestamp': '2024-01-01T00:00:00Z', + 'grid_path': 'grids/grid_0000_test.png' + }) + + # Test valid CSV + assert generator.validate_csv_schema(csv_path) is True + + # Create an invalid CSV (missing required column) + invalid_csv_path = os.path.join(temp_output_dir, 'test_invalid.csv') + with open(invalid_csv_path, 'w', newline='', encoding='utf-8') as f: + writer = csv.DictWriter(f, fieldnames=['filename', 'prompt']) # Missing required columns + writer.writeheader() + + # Test invalid CSV + assert generator.validate_csv_schema(invalid_csv_path) is False + + def test_deterministic_file_naming(self, temp_output_dir, sample_images_data): + """Test that file names are deterministic""" + generator = BatchReportGenerator(output_dir=temp_output_dir) + + # Create temporary CSV to check naming + csv_path = os.path.join(temp_output_dir, 'test_naming.csv') + generator._create_csv(csv_path, sample_images_data) + + # Read CSV and check grid paths + with open(csv_path, 'r', encoding='utf-8') as f: + reader = csv.DictReader(f) + rows = list(reader) + + assert rows[0]['grid_path'] == 'grids/grid_0000_test_image_001.png' + assert rows[1]['grid_path'] == 'grids/grid_0001_test_image_002.png' + + def test_generate_report_creates_zip(self, temp_output_dir, sample_images_data, sample_config): + """Test that generate_report creates a valid ZIP file""" + generator = BatchReportGenerator(output_dir=temp_output_dir) + + # Generate report + zip_path = generator.generate_report( + images_data=sample_images_data, + config=sample_config, + report_name='test_report' + ) + + # Check ZIP file exists + assert os.path.exists(zip_path) + assert zip_path.endswith('test_report.zip') + + # Validate ZIP contents + with zipfile.ZipFile(zip_path, 'r') as zipf: + namelist = zipf.namelist() + + # Check required files exist + assert 'results.csv' in namelist + assert 'config.json' in namelist + assert 'README.txt' in namelist + + # Check CSV content + csv_content = zipf.read('results.csv').decode('utf-8') + assert 'test_image_001.png' in csv_content + assert 'A beautiful landscape' in csv_content + + # Check config.json content + config_content = json.loads(zipf.read('config.json').decode('utf-8')) + assert 'generation_config' in config_content + assert 'report_metadata' in config_content + assert config_content['generation_config']['session_id'] == 'test_session_123' + + # Check README content + readme_content = zipf.read('README.txt').decode('utf-8') + assert 'DreamLayer Batch Report' in readme_content + assert 'CSV Schema:' in readme_content + + def test_validate_zip_contents(self, temp_output_dir, sample_images_data, sample_config): + """Test ZIP contents validation""" + generator = BatchReportGenerator(output_dir=temp_output_dir) + + # Create a simple test ZIP with CSV + zip_path = os.path.join(temp_output_dir, 'test_validation.zip') + with zipfile.ZipFile(zip_path, 'w') as zipf: + # Create CSV content + csv_content = "filename,prompt,negative_prompt,model,sampler,steps,cfg_scale,seed,width,height,timestamp,grid_path\n" + csv_content += "test.png,prompt,negative,model,sampler,20,7.0,123,512,512,2024-01-01,grids/grid_0000_test.png\n" + zipf.writestr('results.csv', csv_content) + + # Add the referenced grid file + zipf.writestr('grids/grid_0000_test.png', b'fake image data') + + # Should validate successfully + assert generator.validate_zip_contents(zip_path) is True + + # Create ZIP with missing referenced file + invalid_zip_path = os.path.join(temp_output_dir, 'test_invalid.zip') + with zipfile.ZipFile(invalid_zip_path, 'w') as zipf: + # CSV references a file that doesn't exist in ZIP + csv_content = "filename,prompt,negative_prompt,model,sampler,steps,cfg_scale,seed,width,height,timestamp,grid_path\n" + csv_content += "test.png,prompt,negative,model,sampler,20,7.0,123,512,512,2024-01-01,grids/missing_file.png\n" + zipf.writestr('results.csv', csv_content) + + # Should fail validation + assert generator.validate_zip_contents(invalid_zip_path) is False + + def test_report_with_empty_images(self, temp_output_dir, sample_config): + """Test handling of empty image list""" + generator = BatchReportGenerator(output_dir=temp_output_dir) + + # Generate report with empty images + zip_path = generator.generate_report( + images_data=[], + config=sample_config, + report_name='empty_report' + ) + + # Check ZIP still created + assert os.path.exists(zip_path) + + # Check CSV is empty but valid + with zipfile.ZipFile(zip_path, 'r') as zipf: + csv_content = zipf.read('results.csv').decode('utf-8') + lines = csv_content.strip().split('\n') + assert len(lines) == 1 # Only header + + def test_report_name_generation(self, temp_output_dir, sample_images_data, sample_config): + """Test automatic report name generation when not provided""" + generator = BatchReportGenerator(output_dir=temp_output_dir) + + # Generate without report name + zip_path = generator.generate_report( + images_data=sample_images_data, + config=sample_config + ) + + # Check name format + filename = os.path.basename(zip_path) + assert filename.startswith('report_') + assert filename.endswith('.zip') + # Check timestamp format (YYYYMMDD_HHMMSS) + timestamp_part = filename[7:-4] # Remove 'report_' and '.zip' + assert len(timestamp_part) == 15 # YYYYMMDD_HHMMSS + assert timestamp_part[8] == '_' + + +class TestBatchReportIntegration: + """Integration tests for batch report generation""" + + def test_full_report_generation_workflow(self, temp_output_dir, sample_images_data, sample_config, mock_served_images_dir): + """Test the complete workflow from data to validated ZIP""" + generator = BatchReportGenerator(output_dir=temp_output_dir) + + # Generate report + zip_path = generator.generate_report( + images_data=sample_images_data, + config=sample_config, + report_name='integration_test' + ) + + # Validate the generated report + assert os.path.exists(zip_path) + assert generator.validate_zip_contents(zip_path) is True + + # Extract and validate CSV schema + temp_extract = os.path.join(temp_output_dir, 'extract') + with zipfile.ZipFile(zip_path, 'r') as zipf: + zipf.extractall(temp_extract) + + csv_path = os.path.join(temp_extract, 'results.csv') + assert generator.validate_csv_schema(csv_path) is True + + # Verify all components present + assert os.path.exists(os.path.join(temp_extract, 'config.json')) + assert os.path.exists(os.path.join(temp_extract, 'README.txt')) + assert os.path.isdir(os.path.join(temp_extract, 'grids')) + + @pytest.mark.parametrize("num_images", [1, 10, 100]) + def test_scalability(self, temp_output_dir, sample_config, num_images): + """Test report generation with varying numbers of images""" + # Generate many images + images_data = [] + for i in range(num_images): + images_data.append({ + 'id': f'img_{i:04d}', + 'filename': f'test_image_{i:04d}.png', + 'url': f'http://localhost:5001/api/images/test_image_{i:04d}.png', + 'prompt': f'Test prompt {i}', + 'negativePrompt': 'negative', + 'timestamp': 1704067200000 + i * 1000, + 'settings': { + 'model': 'test_model.safetensors', + 'sampler': 'Euler', + 'steps': 20, + 'cfg_scale': 7.0, + 'seed': 12345 + i, + 'width': 512, + 'height': 512 + } + }) + + generator = BatchReportGenerator(output_dir=temp_output_dir) + + # Generate report + zip_path = generator.generate_report( + images_data=images_data, + config=sample_config, + report_name=f'scale_test_{num_images}' + ) + + # Verify ZIP created and contains correct number of entries + assert os.path.exists(zip_path) + + with zipfile.ZipFile(zip_path, 'r') as zipf: + csv_content = zipf.read('results.csv').decode('utf-8') + lines = csv_content.strip().split('\n') + assert len(lines) == num_images + 1 # +1 for header \ No newline at end of file diff --git a/dream_layer_frontend/src/components/batch-report/BatchReportGenerator.tsx b/dream_layer_frontend/src/components/batch-report/BatchReportGenerator.tsx new file mode 100644 index 00000000..f5dc5953 --- /dev/null +++ b/dream_layer_frontend/src/components/batch-report/BatchReportGenerator.tsx @@ -0,0 +1,184 @@ +import React, { useState } from 'react'; +import { Button } from '@/components/ui/button'; +import { Input } from '@/components/ui/input'; +import { Label } from '@/components/ui/label'; +import { Checkbox } from '@/components/ui/checkbox'; +import { Card, CardContent, CardDescription, CardHeader, CardTitle } from '@/components/ui/card'; +import { generateBatchReport, downloadBatchReport, ImageReportData, GenerationConfig } from '@/services/reportService'; +import { toast } from '@/hooks/use-toast'; +import { Download, FileArchive, Loader2 } from 'lucide-react'; + +interface BatchReportGeneratorProps { + images: ImageReportData[]; + config?: GenerationConfig; +} + +const BatchReportGenerator: React.FC = ({ images, config = {} }) => { + const [isGenerating, setIsGenerating] = useState(false); + const [reportName, setReportName] = useState(''); + const [includeConfig, setIncludeConfig] = useState(true); + const [includeReadme, setIncludeReadme] = useState(true); + const [selectedImages, setSelectedImages] = useState>(new Set()); + + // Update selected images when images prop changes + React.useEffect(() => { + // Auto-select all images when they become available + setSelectedImages(new Set(images.map(img => img.id))); + }, [images]); + + const handleSelectAll = () => { + if (selectedImages.size === images.length) { + setSelectedImages(new Set()); + } else { + setSelectedImages(new Set(images.map(img => img.id))); + } + }; + + const handleGenerateReport = async () => { + if (selectedImages.size === 0) { + toast({ + title: "No images selected", + description: "Please select at least one image to include in the report.", + variant: "destructive", + }); + return; + } + + setIsGenerating(true); + + try { + // Filter selected images + const selectedImageData = images.filter(img => selectedImages.has(img.id)); + + // Prepare config + const reportConfig: GenerationConfig = { + ...config, + generation_date: new Date().toISOString(), + total_images: selectedImageData.length, + include_config: includeConfig, + include_readme: includeReadme, + }; + + // Generate the report + const blob = await generateBatchReport(selectedImageData, reportConfig, reportName); + + // Download the report + const filename = reportName ? `${reportName}.zip` : undefined; + downloadBatchReport(blob, filename); + + toast({ + title: "Report generated successfully", + description: `${selectedImageData.length} images included in the report.`, + }); + } catch (error) { + console.error('Error generating report:', error); + toast({ + title: "Error generating report", + description: error instanceof Error ? error.message : "An unknown error occurred", + variant: "destructive", + }); + } finally { + setIsGenerating(false); + } + }; + + return ( + + + + + Batch Report Generator + + + Generate a comprehensive report bundle containing selected images, metadata, and configuration. + + + +
+ + setReportName(e.target.value)} + disabled={isGenerating} + /> +

+ Leave empty for automatic timestamp-based naming +

+
+ +
+
+ setIncludeConfig(checked as boolean)} + disabled={isGenerating} + /> + +
+ +
+ setIncludeReadme(checked as boolean)} + disabled={isGenerating} + /> + +
+
+ +
+
+ + +
+ +
+

The report will include:

+
    +
  • results.csv - Metadata for all selected images
  • +
  • grids/ - Directory with all selected images
  • + {includeConfig &&
  • config.json - Complete generation configuration
  • } + {includeReadme &&
  • README.txt - Usage instructions and schema information
  • } +
+
+
+ + +
+
+ ); +}; + +export default BatchReportGenerator; \ No newline at end of file diff --git a/dream_layer_frontend/src/features/Extras/ExtrasPage.tsx b/dream_layer_frontend/src/features/Extras/ExtrasPage.tsx index 79c78be3..38ac1b3a 100644 --- a/dream_layer_frontend/src/features/Extras/ExtrasPage.tsx +++ b/dream_layer_frontend/src/features/Extras/ExtrasPage.tsx @@ -3,6 +3,7 @@ import Accordion from '@/components/Accordion'; import Slider from '@/components/Slider'; import SubTabNavigation from '@/components/SubTabNavigation'; import SizingSettings from '@/components/SizingSettings'; +import BatchReportGenerator from '@/components/batch-report/BatchReportGenerator'; import { RadioGroup, RadioGroupItem } from "@/components/ui/radio-group"; import { Select, @@ -20,6 +21,9 @@ import { toast } from 'sonner'; import ImageUploadButton from '@/components/ImageUploadButton'; import { fetchUpscalerModels } from "@/services/modelService"; import { useModelRefresh } from "@/hooks/useModelRefresh"; +import { useTxt2ImgGalleryStore } from '@/stores/useTxt2ImgGalleryStore'; +import { useImg2ImgGalleryStore } from '@/stores/useImg2ImgGalleryStore'; +import { ImageReportData } from '@/services/reportService'; const ExtrasPage = () => { const [activeSubTab, setActiveSubTab] = useState("upscale"); @@ -28,6 +32,8 @@ const ExtrasPage = () => { const [isProcessing, setIsProcessing] = useState(false); const [processedImage, setProcessedImage] = useState(null); const [availableUpscalers, setAvailableUpscalers] = useState([]); + const txt2imgImages = useTxt2ImgGalleryStore((state) => state.images); + const img2imgImages = useImg2ImgGalleryStore((state) => state.images); // New state for advanced upscaling options const [upscaleMethod, setUpscaleMethod] = useState("upscale-by"); @@ -45,6 +51,8 @@ const ExtrasPage = () => { const subtabs = [ { id: "upscale", label: "Single Image", active: activeSubTab === "upscale" }, + { id: "batch", label: "Batch Processing", active: activeSubTab === "batch" }, + { id: "report", label: "Batch Report", active: activeSubTab === "report" }, ]; const handleSubTabChange = (tabId: string) => { @@ -376,6 +384,61 @@ const ExtrasPage = () => { {renderUpscalingOptions()} ); + + case "report": + // Batch Report tab - generate reports from gallery images + const galleryImages: ImageReportData[] = [...txt2imgImages, ...img2imgImages].map((img, index) => { + // Generate proper filename + let filename: string; + if (img.url.startsWith('data:')) { + // For data URLs, create a filename based on the ID + filename = `${img.id}.png`; + } else { + // For regular URLs, extract the filename + filename = img.url.split('/').pop() || `image_${index}.png`; + } + + return { + id: img.id, + filename: filename, + url: img.url, + prompt: img.prompt, + negativePrompt: img.negativePrompt, + timestamp: img.timestamp, + settings: { + model: img.settings?.model || 'unknown', + sampler: img.settings?.sampler || 'unknown', + steps: img.settings?.steps || 20, + cfg_scale: img.settings?.cfg_scale || 7.0, + seed: img.settings?.seed || -1, + width: img.settings?.width || 512, + height: img.settings?.height || 512, + ...img.settings + } + }; + }); + + const allImages = galleryImages; + + return ( +
+ + + {/* Data source indicator */} + {allImages.length > 0 && ( +
+ Images available: {txt2imgImages.length} from txt2img, {img2imgImages.length} from img2img +
+ )} +
+ ); + default: // "upscale" tab (default) - Single Image return renderUpscalingOptions(); @@ -406,9 +469,14 @@ const ExtrasPage = () => { onTabChange={handleSubTabChange} /> - + {renderSubTabContent()} diff --git a/dream_layer_frontend/src/services/reportService.ts b/dream_layer_frontend/src/services/reportService.ts new file mode 100644 index 00000000..81104bd7 --- /dev/null +++ b/dream_layer_frontend/src/services/reportService.ts @@ -0,0 +1,91 @@ +export interface BatchReportRequest { + images: ImageReportData[]; + config: GenerationConfig; + report_name?: string; +} + +export interface ImageReportData { + id: string; + filename: string; + url: string; + prompt: string; + negativePrompt?: string; + timestamp: number; + settings: GenerationSettings; +} + +export interface GenerationSettings { + model: string; + sampler: string; + steps: number; + cfg_scale: number; + seed: number; + width: number; + height: number; + [key: string]: any; +} + +export interface GenerationConfig { + session_id?: string; + generation_date?: string; + total_images?: number; + [key: string]: any; +} + +export interface BatchReportResponse { + status: string; + message?: string; + download_url?: string; +} + +const EXTRAS_API_BASE_URL = import.meta.env.VITE_EXTRAS_API_BASE_URL || 'http://localhost:5003'; + +export const generateBatchReport = async ( + images: ImageReportData[], + config: GenerationConfig, + reportName?: string +): Promise => { + try { + console.log('🔄 Generating batch report with', images.length, 'images'); + + const requestData: BatchReportRequest = { + images, + config, + report_name: reportName + }; + + const response = await fetch(`${EXTRAS_API_BASE_URL}/api/batch-report/generate`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(requestData), + }); + + if (!response.ok) { + const errorData = await response.json().catch(() => ({ message: 'Unknown error' })); + throw new Error(errorData.message || `Failed to generate report: ${response.statusText}`); + } + + // The response should be a blob (ZIP file) + const blob = await response.blob(); + console.log('✅ Batch report generated successfully, size:', blob.size, 'bytes'); + + return blob; + } catch (error) { + console.error('❌ Error generating batch report:', error); + throw error; + } +}; + +export const downloadBatchReport = (blob: Blob, filename?: string) => { + const url = window.URL.createObjectURL(blob); + const a = document.createElement('a'); + a.href = url; + a.download = filename || `report_${new Date().toISOString().split('T')[0]}.zip`; + document.body.appendChild(a); + a.click(); + document.body.removeChild(a); + window.URL.revokeObjectURL(url); + console.log('📥 Batch report downloaded'); +}; \ No newline at end of file