diff --git a/ComfyUI/user/default/comfy.settings.json b/ComfyUI/user/default/comfy.settings.json index 438b3dad..512a22bd 100644 --- a/ComfyUI/user/default/comfy.settings.json +++ b/ComfyUI/user/default/comfy.settings.json @@ -1,3 +1,6 @@ { - "Comfy.TutorialCompleted": true + "Comfy.TutorialCompleted": true, + "Comfy.Queue.ImageFit": "cover", + "Comfy.Release.Version": "0.3.48", + "Comfy.Release.Timestamp": 1754374644423 } \ No newline at end of file diff --git a/dream_layer_backend/img2img_server.py b/dream_layer_backend/img2img_server.py index e4405a4b..73f7c025 100644 --- a/dream_layer_backend/img2img_server.py +++ b/dream_layer_backend/img2img_server.py @@ -4,6 +4,7 @@ import json import logging import os +import requests from PIL import Image import io import time @@ -11,6 +12,8 @@ from img2img_workflow import transform_to_img2img_workflow from shared_utils import COMFY_API_URL from dream_layer_backend_utils.fetch_advanced_models import get_controlnet_models +from run_registry import create_run_config_from_generation_data +from dataclasses import asdict # Configure logging logging.basicConfig( @@ -178,11 +181,40 @@ def handle_img2img(): logger.info(f" Subfolder: {img.get('subfolder', 'None')}") logger.info(f" URL: {img.get('url')}") + # Extract generated image filenames + generated_images = [] + if comfy_response.get("generated_images"): + for img_data in comfy_response["generated_images"]: + if isinstance(img_data, dict) and "filename" in img_data: + generated_images.append(img_data["filename"]) + + # Register the completed run + try: + run_config = create_run_config_from_generation_data( + data, generated_images, "img2img" + ) + + # Send to run registry + registry_response = requests.post( + "http://localhost:5005/api/runs", + json=asdict(run_config), + timeout=5 + ) + + if registry_response.status_code == 200: + logger.info(f"✅ Run registered successfully: {run_config.run_id}") + else: + logger.warning(f"⚠️ Failed to register run: {registry_response.text}") + + except Exception as e: + logger.warning(f"⚠️ Error registering run: {str(e)}") + response = jsonify({ "status": "success", "message": "Workflow sent to ComfyUI successfully", "comfy_response": comfy_response, - "workflow": workflow + "workflow": workflow, + "run_id": run_config.run_id if 'run_config' in locals() else None }) # Clean up the temporary image file diff --git a/dream_layer_backend/report_bundle.py b/dream_layer_backend/report_bundle.py new file mode 100644 index 00000000..0faf0107 --- /dev/null +++ b/dream_layer_backend/report_bundle.py @@ -0,0 +1,367 @@ +import os +import csv +import json +import zipfile +import shutil +from datetime import datetime +from typing import List, Dict, Any, Optional +from dataclasses import asdict +from flask import Flask, jsonify, request, send_file +from flask_cors import CORS +import requests +from run_registry import RunRegistry, RunConfig + +class ReportBundleGenerator: + """Generates report bundles with CSV, config, images, and README""" + + def __init__(self, output_dir: str = "Dream_Layer_Resources/output"): + self.output_dir = output_dir + self.registry = RunRegistry() + + def generate_csv(self, runs: List[RunConfig]) -> str: + """Generate results.csv with required columns""" + csv_path = "temp_results.csv" + + # Define required CSV columns based on schema + required_columns = [ + 'run_id', + 'timestamp', + 'model', + 'vae', + 'prompt', + 'negative_prompt', + 'seed', + 'sampler', + 'steps', + 'cfg_scale', + 'width', + 'height', + 'batch_size', + 'batch_count', + 'generation_type', + 'image_paths', + 'loras', + 'controlnets', + 'workflow_hash' + ] + + with open(csv_path, 'w', newline='', encoding='utf-8') as csvfile: + writer = csv.DictWriter(csvfile, fieldnames=required_columns) + writer.writeheader() + + for run in runs: + # Prepare loras and controlnets as JSON strings + loras_json = json.dumps(run.loras) if run.loras else "[]" + controlnets_json = json.dumps(run.controlnets) if run.controlnets else "[]" + + # Create workflow hash for identification + workflow_hash = str(hash(json.dumps(run.workflow, sort_keys=True))) + + # Join image paths + image_paths = ";".join(run.generated_images) if run.generated_images else "" + + row = { + 'run_id': run.run_id, + 'timestamp': run.timestamp, + 'model': run.model, + 'vae': run.vae or "", + 'prompt': run.prompt, + 'negative_prompt': run.negative_prompt, + 'seed': run.seed, + 'sampler': run.sampler, + 'steps': run.steps, + 'cfg_scale': run.cfg_scale, + 'width': run.width, + 'height': run.height, + 'batch_size': run.batch_size, + 'batch_count': run.batch_count, + 'generation_type': run.generation_type, + 'image_paths': image_paths, + 'loras': loras_json, + 'controlnets': controlnets_json, + 'workflow_hash': workflow_hash + } + writer.writerow(row) + + return csv_path + + def validate_csv_schema(self, csv_path: str) -> bool: + """Validate that CSV has all required columns""" + try: + with open(csv_path, 'r', encoding='utf-8') as csvfile: + reader = csv.DictReader(csvfile) + fieldnames = reader.fieldnames + + required_columns = [ + 'run_id', 'timestamp', 'model', 'vae', 'prompt', + 'negative_prompt', 'seed', 'sampler', 'steps', 'cfg_scale', + 'width', 'height', 'batch_size', 'batch_count', + 'generation_type', 'image_paths', 'loras', 'controlnets', 'workflow_hash' + ] + + missing_columns = [col for col in required_columns if col not in fieldnames] + if missing_columns: + print(f"❌ Missing required columns: {missing_columns}") + return False + + print(f"✅ CSV schema validation passed") + return True + + except Exception as e: + print(f"❌ CSV schema validation failed: {e}") + return False + + def copy_images_to_bundle(self, runs: List[RunConfig], bundle_dir: str) -> List[str]: + """Copy selected grid images to bundle directory""" + copied_images = [] + + for run in runs: + for image_filename in run.generated_images: + if image_filename: + # Source path in output directory + src_path = os.path.join(self.output_dir, image_filename) + + if os.path.exists(src_path): + # Destination in bundle + dest_path = os.path.join(bundle_dir, "images", image_filename) + os.makedirs(os.path.dirname(dest_path), exist_ok=True) + + try: + shutil.copy2(src_path, dest_path) + copied_images.append(image_filename) + print(f"✅ Copied image: {image_filename}") + except Exception as e: + print(f"❌ Failed to copy {image_filename}: {e}") + else: + print(f"⚠️ Image not found: {src_path}") + + return copied_images + + def create_config_json(self, runs: List[RunConfig]) -> str: + """Create config.json with run configurations""" + config_data = { + "report_metadata": { + "generated_at": datetime.now().isoformat(), + "total_runs": len(runs), + "generation_types": list(set(run.generation_type for run in runs)), + "models_used": list(set(run.model for run in runs)) + }, + "runs": [asdict(run) for run in runs] + } + + config_path = "temp_config.json" + with open(config_path, 'w', encoding='utf-8') as f: + json.dump(config_data, f, indent=2, ensure_ascii=False) + + return config_path + + def create_readme(self, runs: List[RunConfig], copied_images: List[str]) -> str: + """Create README.md for the report bundle""" + readme_content = f"""# Dream Layer Report Bundle + +Generated on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} + +## Overview +This report bundle contains {len(runs)} completed image generation runs with their configurations and results. + +## Contents +- `results.csv` - Tabular data of all runs with metadata +- `config.json` - Detailed configuration data for each run +- `images/` - Generated images from all runs +- `README.md` - This file + +## Statistics +- Total runs: {len(runs)} +- Generation types: {', '.join(set(run.generation_type for run in runs))} +- Models used: {', '.join(set(run.model for run in runs))} +- Images included: {len(copied_images)} + +## CSV Schema +The results.csv file contains the following columns: +- run_id: Unique identifier for each run +- timestamp: When the run was executed +- model: Model used for generation +- vae: VAE model (if any) +- prompt: Positive prompt +- negative_prompt: Negative prompt +- seed: Random seed used +- sampler: Sampling method +- steps: Number of sampling steps +- cfg_scale: CFG scale value +- width/height: Image dimensions +- batch_size/batch_count: Batch settings +- generation_type: txt2img or img2img +- image_paths: Semicolon-separated list of generated image filenames +- loras: JSON array of LoRA configurations +- controlnets: JSON array of ControlNet configurations +- workflow_hash: Hash of the workflow configuration + +## File Paths +All image paths in the CSV resolve to files present in this zip bundle. +""" + + readme_path = "temp_README.md" + with open(readme_path, 'w', encoding='utf-8') as f: + f.write(readme_content) + + return readme_path + + def create_report_bundle(self, run_ids: Optional[List[str]] = None) -> str: + """Create a complete report bundle""" + + # Get runs to include + if run_ids: + runs = [self.registry.get_run(run_id) for run_id in run_ids if self.registry.get_run(run_id)] + else: + runs = self.registry.get_all_runs() + + if not runs: + raise ValueError("No runs found to include in report") + + print(f"📊 Creating report bundle with {len(runs)} runs") + + # Create temporary directory for bundle + bundle_dir = f"temp_report_bundle_{datetime.now().strftime('%Y%m%d_%H%M%S')}" + os.makedirs(bundle_dir, exist_ok=True) + + try: + # Generate CSV + print("📝 Generating results.csv...") + csv_path = self.generate_csv(runs) + + # Validate CSV schema + if not self.validate_csv_schema(csv_path): + raise ValueError("CSV schema validation failed") + + # Copy CSV to bundle + shutil.copy2(csv_path, os.path.join(bundle_dir, "results.csv")) + + # Create config.json + print("⚙️ Creating config.json...") + config_path = self.create_config_json(runs) + shutil.copy2(config_path, os.path.join(bundle_dir, "config.json")) + + # Copy images + print("🖼️ Copying images...") + copied_images = self.copy_images_to_bundle(runs, bundle_dir) + + # Create README + print("📖 Creating README.md...") + readme_path = self.create_readme(runs, copied_images) + shutil.copy2(readme_path, os.path.join(bundle_dir, "README.md")) + + # Create zip file + print("📦 Creating report.zip...") + zip_path = "report.zip" + with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf: + for root, dirs, files in os.walk(bundle_dir): + for file in files: + file_path = os.path.join(root, file) + arcname = os.path.relpath(file_path, bundle_dir) + zipf.write(file_path, arcname) + + # Cleanup temp files + for temp_file in [csv_path, config_path, readme_path]: + if os.path.exists(temp_file): + os.remove(temp_file) + + # Cleanup temp directory + shutil.rmtree(bundle_dir) + + print(f"✅ Report bundle created: {zip_path}") + return zip_path + + except Exception as e: + # Cleanup on error + if os.path.exists(bundle_dir): + shutil.rmtree(bundle_dir) + raise e + +# Flask app for report bundle API +app = Flask(__name__) +CORS(app, resources={ + r"/*": { + "origins": ["http://localhost:*", "http://127.0.0.1:*"], + "methods": ["GET", "POST", "OPTIONS"], + "allow_headers": ["Content-Type"] + } +}) + +generator = ReportBundleGenerator() + +@app.route('/api/report-bundle', methods=['POST']) +def create_report_bundle(): + """Create a report bundle with selected runs""" + try: + data = request.json or {} + run_ids = data.get('run_ids', []) # Empty list means all runs + + zip_path = generator.create_report_bundle(run_ids) + + return jsonify({ + "status": "success", + "message": "Report bundle created successfully", + "file_path": zip_path + }) + + except Exception as e: + return jsonify({ + "status": "error", + "message": str(e) + }), 500 + +@app.route('/api/report-bundle/download', methods=['GET']) +def download_report_bundle(): + """Download the generated report.zip file""" + try: + zip_path = "report.zip" + if not os.path.exists(zip_path): + return jsonify({ + "status": "error", + "message": "Report bundle not found. Please generate one first." + }), 404 + + return send_file( + zip_path, + as_attachment=True, + download_name="report.zip", + mimetype="application/zip" + ) + + except Exception as e: + return jsonify({ + "status": "error", + "message": str(e) + }), 500 + +@app.route('/api/report-bundle/validate', methods=['POST']) +def validate_report_bundle(): + """Validate a report bundle schema""" + try: + data = request.json or {} + csv_content = data.get('csv_content', '') + + # Write CSV content to temp file for validation + temp_csv = "temp_validation.csv" + with open(temp_csv, 'w', encoding='utf-8') as f: + f.write(csv_content) + + is_valid = generator.validate_csv_schema(temp_csv) + + # Cleanup + if os.path.exists(temp_csv): + os.remove(temp_csv) + + return jsonify({ + "status": "success", + "valid": is_valid + }) + + except Exception as e: + return jsonify({ + "status": "error", + "message": str(e) + }), 500 + +if __name__ == '__main__': + app.run(host='0.0.0.0', port=5006, debug=True) \ No newline at end of file diff --git a/dream_layer_backend/run_registry.py b/dream_layer_backend/run_registry.py new file mode 100644 index 00000000..e8ce4f20 --- /dev/null +++ b/dream_layer_backend/run_registry.py @@ -0,0 +1,236 @@ +import json +import os +import time +import uuid +from datetime import datetime +from typing import Dict, List, Any, Optional +from dataclasses import dataclass, asdict +from flask import Flask, jsonify, request +from flask_cors import CORS + +@dataclass +class RunConfig: + """Represents a frozen configuration for a completed run""" + run_id: str + timestamp: str + model: str + vae: Optional[str] + loras: List[Dict[str, Any]] + controlnets: List[Dict[str, Any]] + prompt: str + negative_prompt: str + seed: int + sampler: str + steps: int + cfg_scale: float + width: int + height: int + batch_size: int + batch_count: int + workflow: Dict[str, Any] + version: str + generated_images: List[str] + generation_type: str # "txt2img" or "img2img" + +class RunRegistry: + """Manages completed runs and their configurations""" + + def __init__(self, storage_file: str = "run_registry.json"): + self.storage_file = storage_file + self.runs: Dict[str, RunConfig] = {} + self.load_runs() + + def load_runs(self): + """Load runs from storage file""" + try: + if os.path.exists(self.storage_file): + with open(self.storage_file, 'r', encoding='utf-8') as f: + data = json.load(f) + for run_id, run_data in data.items(): + self.runs[run_id] = RunConfig(**run_data) + except Exception as e: + print(f"Error loading run registry: {e}") + + def save_runs(self): + """Save runs to storage file""" + try: + data = {run_id: asdict(run_config) for run_id, run_config in self.runs.items()} + with open(self.storage_file, 'w', encoding='utf-8') as f: + json.dump(data, f, indent=2, ensure_ascii=False) + except Exception as e: + print(f"Error saving run registry: {e}") + + def add_run(self, config: RunConfig): + """Add a completed run to the registry""" + self.runs[config.run_id] = config + self.save_runs() + + def get_run(self, run_id: str) -> Optional[RunConfig]: + """Get a specific run by ID""" + return self.runs.get(run_id) + + def get_all_runs(self) -> List[RunConfig]: + """Get all runs sorted by timestamp (newest first)""" + return sorted(self.runs.values(), key=lambda x: x.timestamp, reverse=True) + + def delete_run(self, run_id: str) -> bool: + """Delete a run from the registry""" + if run_id in self.runs: + del self.runs[run_id] + self.save_runs() + return True + return False + +# Global registry instance +registry = RunRegistry() + +def create_run_config_from_generation_data( + generation_data: Dict[str, Any], + generated_images: List[str], + generation_type: str +) -> RunConfig: + """Create a RunConfig from generation data""" + + # Extract configuration from generation data + config = RunConfig( + run_id=str(uuid.uuid4()), + timestamp=datetime.now().isoformat(), + model=generation_data.get('model_name', 'unknown'), + vae=generation_data.get('vae_name'), + loras=generation_data.get('lora', []), + controlnets=generation_data.get('controlnet', {}).get('units', []), + prompt=generation_data.get('prompt', ''), + negative_prompt=generation_data.get('negative_prompt', ''), + seed=generation_data.get('seed', 0), + sampler=generation_data.get('sampler_name', 'euler'), + steps=generation_data.get('steps', 20), + cfg_scale=generation_data.get('cfg_scale', 7.0), + width=generation_data.get('width', 512), + height=generation_data.get('height', 512), + batch_size=generation_data.get('batch_size', 1), + batch_count=generation_data.get('batch_count', 1), + workflow=generation_data.get('workflow', {}), + version="1.0.0", # TODO: Get from app version + generated_images=generated_images, + generation_type=generation_type + ) + + return config + +# Flask app for run registry API +app = Flask(__name__) +CORS(app, resources={ + r"/*": { + "origins": ["http://localhost:*", "http://127.0.0.1:*"], + "methods": ["GET", "POST", "DELETE", "OPTIONS"], + "allow_headers": ["Content-Type"] + } +}) + +@app.route('/api/runs', methods=['GET']) +def get_runs(): + """Get all completed runs""" + try: + runs = registry.get_all_runs() + return jsonify({ + "status": "success", + "runs": [asdict(run) for run in runs] + }) + except Exception as e: + return jsonify({ + "status": "error", + "message": str(e) + }), 500 + +@app.route('/api/runs/', methods=['GET']) +def get_run(run_id: str): + """Get a specific run by ID""" + try: + run = registry.get_run(run_id) + if run: + return jsonify({ + "status": "success", + "run": asdict(run) + }) + else: + return jsonify({ + "status": "error", + "message": "Run not found" + }), 404 + except Exception as e: + return jsonify({ + "status": "error", + "message": str(e) + }), 500 + +@app.route('/api/runs', methods=['POST']) +def add_run(): + """Add a new completed run""" + try: + data = request.json + if not data: + return jsonify({ + "status": "error", + "message": "No data provided" + }), 400 + + # Create run config from the provided data + run_config = RunConfig( + run_id=data.get('run_id', str(uuid.uuid4())), + timestamp=data.get('timestamp', datetime.now().isoformat()), + model=data.get('model', 'unknown'), + vae=data.get('vae'), + loras=data.get('loras', []), + controlnets=data.get('controlnets', []), + prompt=data.get('prompt', ''), + negative_prompt=data.get('negative_prompt', ''), + seed=data.get('seed', 0), + sampler=data.get('sampler', 'euler'), + steps=data.get('steps', 20), + cfg_scale=data.get('cfg_scale', 7.0), + width=data.get('width', 512), + height=data.get('height', 512), + batch_size=data.get('batch_size', 1), + batch_count=data.get('batch_count', 1), + workflow=data.get('workflow', {}), + version=data.get('version', '1.0.0'), + generated_images=data.get('generated_images', []), + generation_type=data.get('generation_type', 'txt2img') + ) + + registry.add_run(run_config) + + return jsonify({ + "status": "success", + "run_id": run_config.run_id, + "message": "Run added successfully" + }) + except Exception as e: + return jsonify({ + "status": "error", + "message": str(e) + }), 500 + +@app.route('/api/runs/', methods=['DELETE']) +def delete_run(run_id: str): + """Delete a run""" + try: + success = registry.delete_run(run_id) + if success: + return jsonify({ + "status": "success", + "message": "Run deleted successfully" + }) + else: + return jsonify({ + "status": "error", + "message": "Run not found" + }), 404 + except Exception as e: + return jsonify({ + "status": "error", + "message": str(e) + }), 500 + +if __name__ == '__main__': + app.run(host='0.0.0.0', port=5005, debug=True) \ No newline at end of file diff --git a/dream_layer_backend/shared_utils.py b/dream_layer_backend/shared_utils.py index bfe186d1..a5952bff 100644 --- a/dream_layer_backend/shared_utils.py +++ b/dream_layer_backend/shared_utils.py @@ -81,6 +81,7 @@ def wait_for_image(prompt_id: str, save_node_id: str = "9", max_wait_time: int = Wait for image generation to complete and return the generated images This is a shared function used by both txt2img and img2img servers """ + print("wait_for_image") output_dir, _ = get_directories() start_time = time.time() diff --git a/dream_layer_backend/tests/test_report_bundle.py b/dream_layer_backend/tests/test_report_bundle.py new file mode 100644 index 00000000..52350888 --- /dev/null +++ b/dream_layer_backend/tests/test_report_bundle.py @@ -0,0 +1,400 @@ +import pytest +import json +import csv +import tempfile +import os +import zipfile +from datetime import datetime +from report_bundle import ReportBundleGenerator, RunConfig + +class TestReportBundle: + """Test cases for the Report Bundle functionality""" + + def setup_method(self): + """Set up test fixtures""" + # Create temporary directories for testing + self.temp_output_dir = tempfile.mkdtemp() + self.generator = ReportBundleGenerator(self.temp_output_dir) + + # Create test images + self.test_images = [] + for i in range(3): + image_path = os.path.join(self.temp_output_dir, f"test_image_{i}.png") + with open(image_path, 'w') as f: + f.write(f"fake image data {i}") + self.test_images.append(f"test_image_{i}.png") + + def teardown_method(self): + """Clean up test fixtures""" + import shutil + if os.path.exists(self.temp_output_dir): + shutil.rmtree(self.temp_output_dir) + + def test_required_csv_columns_exist(self): + """Test that CSV has all required columns""" + # Create test runs + test_runs = [ + RunConfig( + run_id="test-run-1", + timestamp=datetime.now().isoformat(), + model="test-model.safetensors", + vae=None, + loras=[], + controlnets=[], + prompt="test prompt 1", + negative_prompt="test negative 1", + seed=12345, + sampler="euler", + steps=20, + cfg_scale=7.0, + width=512, + height=512, + batch_size=1, + batch_count=1, + workflow={}, + version="1.0.0", + generated_images=self.test_images[:1], + generation_type="txt2img" + ), + RunConfig( + run_id="test-run-2", + timestamp=datetime.now().isoformat(), + model="test-model-2.safetensors", + vae="test-vae.safetensors", + loras=[{"name": "test-lora", "strength": 0.8}], + controlnets=[{"model": "test-controlnet", "strength": 1.0, "enabled": True}], + prompt="test prompt 2", + negative_prompt="test negative 2", + seed=67890, + sampler="dpm++", + steps=30, + cfg_scale=8.5, + width=768, + height=768, + batch_size=2, + batch_count=3, + workflow={"test": "workflow"}, + version="1.0.0", + generated_images=self.test_images[1:], + generation_type="img2img" + ) + ] + + # Generate CSV + csv_path = self.generator.generate_csv(test_runs) + + # Validate schema + assert self.generator.validate_csv_schema(csv_path) + + # Check that all required columns exist + with open(csv_path, 'r', encoding='utf-8') as f: + reader = csv.DictReader(f) + fieldnames = reader.fieldnames + + required_columns = [ + 'run_id', 'timestamp', 'model', 'vae', 'prompt', + 'negative_prompt', 'seed', 'sampler', 'steps', 'cfg_scale', + 'width', 'height', 'batch_size', 'batch_count', + 'generation_type', 'image_paths', 'loras', 'controlnets', 'workflow_hash' + ] + + for column in required_columns: + assert column in fieldnames, f"Missing required column: {column}" + + # Cleanup + if os.path.exists(csv_path): + os.remove(csv_path) + + def test_empty_values_handled_without_crashes(self): + """Test that empty values are handled without crashes""" + # Create test run with empty values + test_run = RunConfig( + run_id="", + timestamp="", + model="", + vae=None, + loras=[], + controlnets=[], + prompt="", + negative_prompt="", + seed=0, + sampler="", + steps=0, + cfg_scale=0.0, + width=0, + height=0, + batch_size=0, + batch_count=0, + workflow={}, + version="", + generated_images=[], + generation_type="" + ) + + # Should not crash when generating CSV + csv_path = self.generator.generate_csv([test_run]) + + # Should not crash when validating schema + assert self.generator.validate_csv_schema(csv_path) + + # Cleanup + if os.path.exists(csv_path): + os.remove(csv_path) + + def test_csv_schema_validation(self): + """Test CSV schema validation with valid and invalid schemas""" + # Test valid CSV + valid_csv_content = """run_id,timestamp,model,vae,prompt,negative_prompt,seed,sampler,steps,cfg_scale,width,height,batch_size,batch_count,generation_type,image_paths,loras,controlnets,workflow_hash +test-run,2023-01-01T00:00:00,model.safetensors,vae.safetensors,prompt,negative,123,euler,20,7.0,512,512,1,1,txt2img,image.png,[],[],hash123""" + + temp_csv = "temp_valid.csv" + with open(temp_csv, 'w', encoding='utf-8') as f: + f.write(valid_csv_content) + + assert self.generator.validate_csv_schema(temp_csv) + + # Test invalid CSV (missing columns) + invalid_csv_content = """run_id,timestamp,model,prompt,seed +test-run,2023-01-01T00:00:00,model.safetensors,prompt,123""" + + temp_csv_invalid = "temp_invalid.csv" + with open(temp_csv_invalid, 'w', encoding='utf-8') as f: + f.write(invalid_csv_content) + + assert not self.generator.validate_csv_schema(temp_csv_invalid) + + # Cleanup + for temp_file in [temp_csv, temp_csv_invalid]: + if os.path.exists(temp_file): + os.remove(temp_file) + + def test_image_paths_resolve_to_files(self): + """Test that all image paths in CSV resolve to files present in zip""" + # Create test runs with images + test_runs = [ + RunConfig( + run_id="test-run-1", + timestamp=datetime.now().isoformat(), + model="test-model.safetensors", + vae=None, + loras=[], + controlnets=[], + prompt="test prompt", + negative_prompt="", + seed=12345, + sampler="euler", + steps=20, + cfg_scale=7.0, + width=512, + height=512, + batch_size=1, + batch_count=1, + workflow={}, + version="1.0.0", + generated_images=self.test_images, + generation_type="txt2img" + ) + ] + + # Mock the registry to return our test runs + self.generator.registry.runs = {run.run_id: run for run in test_runs} + + # Create report bundle + zip_path = self.generator.create_report_bundle([test_run.run_id for test_run in test_runs]) + + # Verify zip file exists + assert os.path.exists(zip_path) + + # Extract and verify contents + with zipfile.ZipFile(zip_path, 'r') as zipf: + # Check that results.csv exists + csv_files = [f for f in zipf.namelist() if f.endswith('results.csv')] + assert len(csv_files) == 1 + + # Read CSV and verify image paths + with zipf.open(csv_files[0]) as csv_file: + reader = csv.DictReader(csv_file.read().decode('utf-8').splitlines()) + for row in reader: + image_paths = row['image_paths'].split(';') if row['image_paths'] else [] + for image_path in image_paths: + if image_path.strip(): + # Check that image exists in zip + image_in_zip = f"images/{image_path}" + assert image_in_zip in zipf.namelist(), f"Image {image_path} not found in zip" + + # Check that config.json exists + config_files = [f for f in zipf.namelist() if f.endswith('config.json')] + assert len(config_files) == 1 + + # Check that README.md exists + readme_files = [f for f in zipf.namelist() if f.endswith('README.md')] + assert len(readme_files) == 1 + + # Cleanup + if os.path.exists(zip_path): + os.remove(zip_path) + + def test_deterministic_file_names_and_paths(self): + """Test that file names and paths are deterministic""" + test_runs = [ + RunConfig( + run_id="test-run-1", + timestamp=datetime.now().isoformat(), + model="test-model.safetensors", + vae=None, + loras=[], + controlnets=[], + prompt="test prompt", + negative_prompt="", + seed=12345, + sampler="euler", + steps=20, + cfg_scale=7.0, + width=512, + height=512, + batch_size=1, + batch_count=1, + workflow={}, + version="1.0.0", + generated_images=self.test_images[:1], + generation_type="txt2img" + ) + ] + + # Mock the registry to return our test runs + self.generator.registry.runs = {run.run_id: run for run in test_runs} + + # Create two report bundles with same data + zip_path_1 = self.generator.create_report_bundle([test_runs[0].run_id]) + zip_path_2 = self.generator.create_report_bundle([test_runs[0].run_id]) + + # Verify both zip files exist + assert os.path.exists(zip_path_1) + assert os.path.exists(zip_path_2) + + # Check that both zips have same structure + with zipfile.ZipFile(zip_path_1, 'r') as zip1, zipfile.ZipFile(zip_path_2, 'r') as zip2: + files_1 = sorted(zip1.namelist()) + files_2 = sorted(zip2.namelist()) + + # Should have same files + assert files_1 == files_2 + + # Should have expected file structure + expected_files = [ + 'results.csv', + 'config.json', + 'README.md', + 'images/test_image_0.png' + ] + + for expected_file in expected_files: + assert any(f.endswith(expected_file) for f in files_1), f"Missing {expected_file}" + + # Cleanup + for zip_path in [zip_path_1, zip_path_2]: + if os.path.exists(zip_path): + os.remove(zip_path) + + def test_config_json_structure(self): + """Test that config.json has correct structure""" + test_runs = [ + RunConfig( + run_id="test-run-1", + timestamp=datetime.now().isoformat(), + model="test-model.safetensors", + vae=None, + loras=[], + controlnets=[], + prompt="test prompt", + negative_prompt="", + seed=12345, + sampler="euler", + steps=20, + cfg_scale=7.0, + width=512, + height=512, + batch_size=1, + batch_count=1, + workflow={}, + version="1.0.0", + generated_images=self.test_images[:1], + generation_type="txt2img" + ) + ] + + # Create config.json + config_path = self.generator.create_config_json(test_runs) + + # Verify structure + with open(config_path, 'r', encoding='utf-8') as f: + config_data = json.load(f) + + # Check required top-level keys + assert 'report_metadata' in config_data + assert 'runs' in config_data + + # Check metadata structure + metadata = config_data['report_metadata'] + assert 'generated_at' in metadata + assert 'total_runs' in metadata + assert 'generation_types' in metadata + assert 'models_used' in metadata + + # Check runs structure + runs = config_data['runs'] + assert len(runs) == 1 + assert runs[0]['run_id'] == 'test-run-1' + + # Cleanup + if os.path.exists(config_path): + os.remove(config_path) + + def test_readme_content(self): + """Test that README.md has correct content""" + test_runs = [ + RunConfig( + run_id="test-run-1", + timestamp=datetime.now().isoformat(), + model="test-model.safetensors", + vae=None, + loras=[], + controlnets=[], + prompt="test prompt", + negative_prompt="", + seed=12345, + sampler="euler", + steps=20, + cfg_scale=7.0, + width=512, + height=512, + batch_size=1, + batch_count=1, + workflow={}, + version="1.0.0", + generated_images=self.test_images[:1], + generation_type="txt2img" + ) + ] + + # Create README + readme_path = self.generator.create_readme(test_runs, self.test_images[:1]) + + # Verify content + with open(readme_path, 'r', encoding='utf-8') as f: + content = f.read() + + # Check for required sections + assert '# Dream Layer Report Bundle' in content + assert '## Overview' in content + assert '## Contents' in content + assert '## Statistics' in content + assert '## CSV Schema' in content + assert 'results.csv' in content + assert 'config.json' in content + assert 'images/' in content + assert 'README.md' in content + + # Cleanup + if os.path.exists(readme_path): + os.remove(readme_path) \ No newline at end of file diff --git a/dream_layer_backend/tests/test_run_registry.py b/dream_layer_backend/tests/test_run_registry.py new file mode 100644 index 00000000..ad3895ef --- /dev/null +++ b/dream_layer_backend/tests/test_run_registry.py @@ -0,0 +1,325 @@ +import pytest +import json +import tempfile +import os +from datetime import datetime +from run_registry import RunConfig, RunRegistry, create_run_config_from_generation_data + +class TestRunRegistry: + """Test cases for the Run Registry functionality""" + + def setup_method(self): + """Set up test fixtures""" + # Create a temporary file for testing + self.temp_file = tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.json') + self.temp_file.close() + self.registry = RunRegistry(self.temp_file.name) + + def teardown_method(self): + """Clean up test fixtures""" + if os.path.exists(self.temp_file.name): + os.unlink(self.temp_file.name) + + def test_required_keys_exist(self): + """Test that RunConfig has all required keys""" + # Create a minimal run config with all required fields + config = RunConfig( + run_id="test-run-123", + timestamp=datetime.now().isoformat(), + model="test-model.safetensors", + vae=None, + loras=[], + controlnets=[], + prompt="test prompt", + negative_prompt="test negative", + seed=12345, + sampler="euler", + steps=20, + cfg_scale=7.0, + width=512, + height=512, + batch_size=1, + batch_count=1, + workflow={}, + version="1.0.0", + generated_images=[], + generation_type="txt2img" + ) + + # Assert all required keys exist + required_keys = [ + 'run_id', 'timestamp', 'model', 'vae', 'loras', 'controlnets', + 'prompt', 'negative_prompt', 'seed', 'sampler', 'steps', 'cfg_scale', + 'width', 'height', 'batch_size', 'batch_count', 'workflow', 'version', + 'generated_images', 'generation_type' + ] + + for key in required_keys: + assert hasattr(config, key), f"Missing required key: {key}" + + def test_empty_values_handled(self): + """Test that empty values are handled without crashes""" + # Test with empty strings and None values + config = RunConfig( + run_id="", + timestamp="", + model="", + vae=None, + loras=[], + controlnets=[], + prompt="", + negative_prompt="", + seed=0, + sampler="", + steps=0, + cfg_scale=0.0, + width=0, + height=0, + batch_size=0, + batch_count=0, + workflow={}, + version="", + generated_images=[], + generation_type="" + ) + + # Should not crash when accessing any field + assert config.run_id == "" + assert config.prompt == "" + assert config.negative_prompt == "" + assert config.model == "" + assert config.vae is None + assert config.loras == [] + assert config.controlnets == [] + assert config.seed == 0 + assert config.sampler == "" + assert config.steps == 0 + assert config.cfg_scale == 0.0 + assert config.width == 0 + assert config.height == 0 + assert config.batch_size == 0 + assert config.batch_count == 0 + assert config.workflow == {} + assert config.version == "" + assert config.generated_images == [] + assert config.generation_type == "" + + def test_create_run_config_from_generation_data(self): + """Test creating run config from generation data""" + # Test with minimal data + generation_data = { + 'prompt': 'test prompt', + 'negative_prompt': 'test negative', + 'model_name': 'test-model.safetensors', + 'seed': 12345, + 'sampler_name': 'euler', + 'steps': 20, + 'cfg_scale': 7.0, + 'width': 512, + 'height': 512, + 'batch_size': 1, + 'batch_count': 1 + } + + generated_images = ['test-image-1.png', 'test-image-2.png'] + + config = create_run_config_from_generation_data( + generation_data, generated_images, "txt2img" + ) + + # Assert required keys exist + assert hasattr(config, 'run_id') + assert hasattr(config, 'timestamp') + assert hasattr(config, 'model') + assert hasattr(config, 'prompt') + assert hasattr(config, 'negative_prompt') + assert hasattr(config, 'seed') + assert hasattr(config, 'sampler') + assert hasattr(config, 'steps') + assert hasattr(config, 'cfg_scale') + assert hasattr(config, 'width') + assert hasattr(config, 'height') + assert hasattr(config, 'batch_size') + assert hasattr(config, 'batch_count') + assert hasattr(config, 'workflow') + assert hasattr(config, 'version') + assert hasattr(config, 'generated_images') + assert hasattr(config, 'generation_type') + + # Assert values are set correctly + assert config.prompt == 'test prompt' + assert config.negative_prompt == 'test negative' + assert config.model == 'test-model.safetensors' + assert config.seed == 12345 + assert config.sampler == 'euler' + assert config.steps == 20 + assert config.cfg_scale == 7.0 + assert config.width == 512 + assert config.height == 512 + assert config.batch_size == 1 + assert config.batch_count == 1 + assert config.generated_images == generated_images + assert config.generation_type == 'txt2img' + + def test_create_run_config_with_empty_data(self): + """Test creating run config with empty/missing data""" + # Test with minimal/empty data + generation_data = {} + generated_images = [] + + config = create_run_config_from_generation_data( + generation_data, generated_images, "img2img" + ) + + # Should not crash and should have default values + assert config.prompt == '' + assert config.negative_prompt == '' + assert config.model == 'unknown' + assert config.seed == 0 + assert config.sampler == 'euler' + assert config.steps == 20 + assert config.cfg_scale == 7.0 + assert config.width == 512 + assert config.height == 512 + assert config.batch_size == 1 + assert config.batch_count == 1 + assert config.generated_images == [] + assert config.generation_type == 'img2img' + + def test_registry_save_and_load(self): + """Test that registry can save and load runs""" + # Create a test run + config = RunConfig( + run_id="test-run-123", + timestamp=datetime.now().isoformat(), + model="test-model.safetensors", + vae=None, + loras=[], + controlnets=[], + prompt="test prompt", + negative_prompt="test negative", + seed=12345, + sampler="euler", + steps=20, + cfg_scale=7.0, + width=512, + height=512, + batch_size=1, + batch_count=1, + workflow={}, + version="1.0.0", + generated_images=[], + generation_type="txt2img" + ) + + # Add to registry + self.registry.add_run(config) + + # Create new registry instance to test loading + new_registry = RunRegistry(self.temp_file.name) + + # Should load the saved run + loaded_run = new_registry.get_run("test-run-123") + assert loaded_run is not None + assert loaded_run.run_id == "test-run-123" + assert loaded_run.prompt == "test prompt" + assert loaded_run.model == "test-model.safetensors" + + def test_registry_get_all_runs(self): + """Test getting all runs""" + # Add multiple runs + config1 = RunConfig( + run_id="test-run-1", + timestamp="2023-01-01T00:00:00", + model="model1.safetensors", + vae=None, + loras=[], + controlnets=[], + prompt="prompt 1", + negative_prompt="", + seed=1, + sampler="euler", + steps=20, + cfg_scale=7.0, + width=512, + height=512, + batch_size=1, + batch_count=1, + workflow={}, + version="1.0.0", + generated_images=[], + generation_type="txt2img" + ) + + config2 = RunConfig( + run_id="test-run-2", + timestamp="2023-01-02T00:00:00", + model="model2.safetensors", + vae=None, + loras=[], + controlnets=[], + prompt="prompt 2", + negative_prompt="", + seed=2, + sampler="euler", + steps=20, + cfg_scale=7.0, + width=512, + height=512, + batch_size=1, + batch_count=1, + workflow={}, + version="1.0.0", + generated_images=[], + generation_type="img2img" + ) + + self.registry.add_run(config1) + self.registry.add_run(config2) + + # Get all runs (should be sorted by timestamp, newest first) + all_runs = self.registry.get_all_runs() + assert len(all_runs) == 2 + assert all_runs[0].run_id == "test-run-2" # Newer timestamp + assert all_runs[1].run_id == "test-run-1" # Older timestamp + + def test_registry_delete_run(self): + """Test deleting a run""" + config = RunConfig( + run_id="test-run-to-delete", + timestamp=datetime.now().isoformat(), + model="test-model.safetensors", + vae=None, + loras=[], + controlnets=[], + prompt="test prompt", + negative_prompt="", + seed=12345, + sampler="euler", + steps=20, + cfg_scale=7.0, + width=512, + height=512, + batch_size=1, + batch_count=1, + workflow={}, + version="1.0.0", + generated_images=[], + generation_type="txt2img" + ) + + self.registry.add_run(config) + + # Verify run exists + assert self.registry.get_run("test-run-to-delete") is not None + + # Delete run + success = self.registry.delete_run("test-run-to-delete") + assert success is True + + # Verify run is deleted + assert self.registry.get_run("test-run-to-delete") is None + + # Try to delete non-existent run + success = self.registry.delete_run("non-existent-run") + assert success is False \ No newline at end of file diff --git a/dream_layer_backend/txt2img_server.py b/dream_layer_backend/txt2img_server.py index cc25eba2..b03617a4 100644 --- a/dream_layer_backend/txt2img_server.py +++ b/dream_layer_backend/txt2img_server.py @@ -2,12 +2,15 @@ from flask_cors import CORS import json import os +import requests from dream_layer import get_directories from dream_layer_backend_utils import interrupt_workflow from shared_utils import send_to_comfyui from dream_layer_backend_utils.fetch_advanced_models import get_controlnet_models from PIL import Image, ImageDraw from txt2img_workflow import transform_to_txt2img_workflow +from run_registry import create_run_config_from_generation_data +from dataclasses import asdict app = Flask(__name__) CORS(app, resources={ @@ -76,11 +79,41 @@ def handle_txt2img(): "message": comfy_response["error"] }), 500 + # Extract generated image filenames + generated_images = [] + if comfy_response.get("all_images"): + for img_data in comfy_response["all_images"]: + if isinstance(img_data, dict) and "filename" in img_data: + generated_images.append(img_data["filename"]) + + print("Start register process") + # Register the completed run + try: + run_config = create_run_config_from_generation_data( + data, generated_images, "txt2img" + ) + + # Send to run registry + registry_response = requests.post( + "http://localhost:5005/api/runs", + json=asdict(run_config), + timeout=5 + ) + + if registry_response.status_code == 200: + print(f"✅ Run registered successfully: {run_config.run_id}") + else: + print(f"⚠️ Failed to register run: {registry_response.text}") + + except Exception as e: + print(f"⚠️ Error registering run: {str(e)}") + response = jsonify({ "status": "success", "message": "Workflow sent to ComfyUI successfully", "comfy_response": comfy_response, - "generated_images": comfy_response.get("all_images", []) + "generated_images": comfy_response.get("all_images", []), + "run_id": run_config.run_id if 'run_config' in locals() else None }) return response diff --git a/dream_layer_frontend/src/components/Navigation/TabsNav.tsx b/dream_layer_frontend/src/components/Navigation/TabsNav.tsx index f0b8398f..041c887d 100644 --- a/dream_layer_frontend/src/components/Navigation/TabsNav.tsx +++ b/dream_layer_frontend/src/components/Navigation/TabsNav.tsx @@ -4,7 +4,9 @@ import { ImageIcon, Settings, GalleryHorizontal, - HardDrive + HardDrive, + History, + Download } from "lucide-react"; const tabs = [ @@ -13,7 +15,9 @@ const tabs = [ { id: "extras", label: "Extras", icon: GalleryHorizontal }, { id: "models", label: "Models", icon: HardDrive }, { id: "pnginfo", label: "PNG Info", icon: FileText }, - { id: "configurations", label: "Configurations", icon: Settings } + { id: "configurations", label: "Configurations", icon: Settings }, + { id: "runregistry", label: "Run Registry", icon: History }, + { id: "reportbundle", label: "Report Bundle", icon: Download } ]; interface TabsNavProps { diff --git a/dream_layer_frontend/src/features/ReportBundle/ReportBundlePage.tsx b/dream_layer_frontend/src/features/ReportBundle/ReportBundlePage.tsx new file mode 100644 index 00000000..28431afa --- /dev/null +++ b/dream_layer_frontend/src/features/ReportBundle/ReportBundlePage.tsx @@ -0,0 +1,256 @@ +import React, { useState } from 'react'; +import { useReportBundleStore } from '@/stores/useReportBundleStore'; +import { useRunRegistryStore } from '@/stores/useRunRegistryStore'; +import { Download, FileText, AlertCircle, CheckCircle, Loader2 } from 'lucide-react'; +import { Button } from '@/components/ui/button'; +import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/card'; +import { Alert, AlertDescription } from '@/components/ui/alert'; +import { Checkbox } from '@/components/ui/checkbox'; +import { Badge } from '@/components/ui/badge'; +import { Separator } from '@/components/ui/separator'; +import { format } from 'date-fns'; + +const ReportBundlePage: React.FC = () => { + const { + generating, + error, + downloadUrl, + generateReport, + downloadReport, + clearError + } = useReportBundleStore(); + + const { runs, fetchRuns } = useRunRegistryStore(); + const [selectedRuns, setSelectedRuns] = useState>(new Set()); + + React.useEffect(() => { + fetchRuns(); + }, [fetchRuns]); + + const handleGenerateReport = async () => { + const runIds = selectedRuns.size > 0 ? Array.from(selectedRuns) : undefined; + await generateReport(runIds); + }; + + const handleDownloadReport = async () => { + await downloadReport(); + }; + + const toggleRunSelection = (runId: string) => { + const newSelected = new Set(selectedRuns); + if (newSelected.has(runId)) { + newSelected.delete(runId); + } else { + newSelected.add(runId); + } + setSelectedRuns(newSelected); + }; + + const selectAllRuns = () => { + setSelectedRuns(new Set(runs.map(run => run.run_id))); + }; + + const clearSelection = () => { + setSelectedRuns(new Set()); + }; + + const formatTimestamp = (timestamp: string) => { + try { + return format(new Date(timestamp), 'MMM dd, yyyy HH:mm:ss'); + } catch { + return timestamp; + } + }; + + const getGenerationTypeColor = (type: string) => { + return type === 'txt2img' ? 'bg-blue-100 text-blue-800' : 'bg-green-100 text-green-800'; + }; + + return ( +
+
+

Report Bundle

+
+ + +
+
+ + {error && ( + + + + {error} + + + + )} + + {downloadUrl && ( + + + + Report bundle generated successfully! + + + + )} + + + + + + Generate Report Bundle + + + +
+
+

+ {selectedRuns.size > 0 + ? `Selected ${selectedRuns.size} runs for report bundle` + : 'No runs selected - will include all runs' + } +

+

+ The report bundle will contain results.csv, config.json, selected images, and README.md +

+
+ +
+ + + +
+

Select Runs to Include

+

+ Leave all unchecked to include all runs in the report bundle +

+ + {runs.length === 0 ? ( +
+ +

No runs available

+

Complete some image generations to see them here

+
+ ) : ( +
+ {runs.map((run) => ( +
+ toggleRunSelection(run.run_id)} + /> +
+
+ + {run.run_id.slice(0, 8)}... + + + {run.generation_type.toUpperCase()} + + + {formatTimestamp(run.timestamp)} + +
+

{run.prompt}

+
+ Model: {run.model} + Seed: {run.seed} + Steps: {run.steps} + CFG: {run.cfg_scale} + Images: {run.generated_images.length} +
+
+
+ ))} +
+ )} +
+
+
+ + + + Report Bundle Contents + + +
+
+ + results.csv + + - Tabular data with all run metadata and configurations + +
+
+ + config.json + + - Detailed configuration data for each run + +
+
+ + images/ + + - Generated images from all included runs + +
+
+ + README.md + + - Documentation and schema information + +
+
+ + + +
+

CSV Schema: run_id, timestamp, model, vae, prompt, negative_prompt, seed, sampler, steps, cfg_scale, width, height, batch_size, batch_count, generation_type, image_paths, loras, controlnets, workflow_hash

+

Validation: All required CSV columns are verified, and all image paths resolve to files present in the zip bundle.

+
+
+
+
+ ); +}; + +export default ReportBundlePage; \ No newline at end of file diff --git a/dream_layer_frontend/src/features/ReportBundle/index.ts b/dream_layer_frontend/src/features/ReportBundle/index.ts new file mode 100644 index 00000000..b548fd1f --- /dev/null +++ b/dream_layer_frontend/src/features/ReportBundle/index.ts @@ -0,0 +1 @@ +export { default as ReportBundlePage } from './ReportBundlePage'; \ No newline at end of file diff --git a/dream_layer_frontend/src/features/RunRegistry/RunRegistryPage.tsx b/dream_layer_frontend/src/features/RunRegistry/RunRegistryPage.tsx new file mode 100644 index 00000000..ce43e6d9 --- /dev/null +++ b/dream_layer_frontend/src/features/RunRegistry/RunRegistryPage.tsx @@ -0,0 +1,319 @@ +import React, { useEffect } from 'react'; +import { useRunRegistryStore } from '@/stores/useRunRegistryStore'; +import { RunConfig } from '@/types/runRegistry'; +import { format } from 'date-fns'; +import { Trash2, Eye, Clock, Image as ImageIcon } from 'lucide-react'; +import { Button } from '@/components/ui/button'; +import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/card'; +import { Badge } from '@/components/ui/badge'; +import { Skeleton } from '@/components/ui/skeleton'; +import { Alert, AlertDescription } from '@/components/ui/alert'; +import { Dialog, DialogContent, DialogHeader, DialogTitle } from '@/components/ui/dialog'; +import { ScrollArea } from '@/components/ui/scroll-area'; +import { Separator } from '@/components/ui/separator'; + +const RunRegistryPage: React.FC = () => { + const { + runs, + loading, + error, + selectedRun, + fetchRuns, + deleteRun, + clearError, + setSelectedRun + } = useRunRegistryStore(); + + useEffect(() => { + fetchRuns(); + }, [fetchRuns]); + + const handleDeleteRun = async (runId: string) => { + if (confirm('Are you sure you want to delete this run?')) { + await deleteRun(runId); + } + }; + + const handleViewConfig = (run: RunConfig) => { + setSelectedRun(run); + }; + + const formatTimestamp = (timestamp: string) => { + try { + return format(new Date(timestamp), 'MMM dd, yyyy HH:mm:ss'); + } catch { + return timestamp; + } + }; + + const getGenerationTypeColor = (type: string) => { + return type === 'txt2img' ? 'bg-blue-100 text-blue-800' : 'bg-green-100 text-green-800'; + }; + + if (loading) { + return ( +
+

Run Registry

+
+ {[...Array(5)].map((_, i) => ( + + + + + + + + + + ))} +
+
+ ); + } + + if (error) { + return ( +
+

Run Registry

+ + + {error} + + + +
+ ); + } + + return ( +
+
+

Run Registry

+ +
+ + {runs.length === 0 ? ( + + + +

No runs yet

+

+ Completed generations will appear here with their frozen configurations. +

+
+
+ ) : ( +
+ {runs.map((run) => ( + + +
+
+ + {run.run_id.slice(0, 8)}... + + + {run.generation_type.toUpperCase()} + +
+
+ + +
+
+
+ + {formatTimestamp(run.timestamp)} +
+
+ +
+
+

{run.prompt}

+ {run.negative_prompt && ( +

+ Negative: {run.negative_prompt} +

+ )} +
+ +
+
+ Model: {run.model} + Sampler: {run.sampler} + Steps: {run.steps} + CFG: {run.cfg_scale} +
+
+ + {run.generated_images.length} image(s) +
+
+ + {(run.loras.length > 0 || run.controlnets.length > 0) && ( +
+ {run.loras.map((lora, index) => ( + + LoRA: {lora.name} + + ))} + {run.controlnets.map((controlnet, index) => ( + + ControlNet: {controlnet.model} + + ))} +
+ )} +
+
+
+ ))} +
+ )} + + {/* Frozen Config Modal */} + setSelectedRun(null)}> + + + Frozen Configuration + + {selectedRun && ( + +
+
+
+

Basic Info

+
+

Run ID: {selectedRun.run_id}

+

Timestamp: {formatTimestamp(selectedRun.timestamp)}

+

Type: {selectedRun.generation_type}

+

Version: {selectedRun.version}

+
+
+
+

Generation Settings

+
+

Model: {selectedRun.model}

+

VAE: {selectedRun.vae || 'Default'}

+

Sampler: {selectedRun.sampler}

+

Steps: {selectedRun.steps}

+

CFG Scale: {selectedRun.cfg_scale}

+

Seed: {selectedRun.seed}

+
+
+
+ + + +
+

Prompts

+
+
+

Positive Prompt:

+

{selectedRun.prompt}

+
+ {selectedRun.negative_prompt && ( +
+

Negative Prompt:

+

{selectedRun.negative_prompt}

+
+ )} +
+
+ + + +
+

Image Settings

+
+

Width: {selectedRun.width}

+

Height: {selectedRun.height}

+

Batch Size: {selectedRun.batch_size}

+

Batch Count: {selectedRun.batch_count}

+
+
+ + {selectedRun.loras.length > 0 && ( + <> + +
+

LoRAs

+
+ {selectedRun.loras.map((lora, index) => ( +
+

Name: {lora.name}

+

Strength: {lora.strength}

+
+ ))} +
+
+ + )} + + {selectedRun.controlnets.length > 0 && ( + <> + +
+

ControlNets

+
+ {selectedRun.controlnets.map((controlnet, index) => ( +
+

Model: {controlnet.model}

+

Strength: {controlnet.strength}

+

Enabled: {controlnet.enabled ? 'Yes' : 'No'}

+
+ ))} +
+
+ + )} + + + +
+

Generated Images

+
+ {selectedRun.generated_images.map((image, index) => ( +
+

{image}

+
+ ))} +
+
+ + + +
+

Workflow (Serialized)

+
+                    {JSON.stringify(selectedRun.workflow, null, 2)}
+                  
+
+
+
+ )} +
+
+
+ ); +}; + +export default RunRegistryPage; \ No newline at end of file diff --git a/dream_layer_frontend/src/features/RunRegistry/index.ts b/dream_layer_frontend/src/features/RunRegistry/index.ts new file mode 100644 index 00000000..7b905721 --- /dev/null +++ b/dream_layer_frontend/src/features/RunRegistry/index.ts @@ -0,0 +1 @@ +export { default as RunRegistryPage } from './RunRegistryPage'; \ No newline at end of file diff --git a/dream_layer_frontend/src/pages/Index.tsx b/dream_layer_frontend/src/pages/Index.tsx index 741c71be..3481cf51 100644 --- a/dream_layer_frontend/src/pages/Index.tsx +++ b/dream_layer_frontend/src/pages/Index.tsx @@ -8,6 +8,8 @@ import ExtrasPage from '@/features/Extras'; import { ModelManagerPage } from '@/features/ModelManager'; import { PNGInfoPage } from '@/features/PNGInfo'; import { ConfigurationsPage } from '@/features/Configurations'; +import { RunRegistryPage } from '@/features/RunRegistry'; +import { ReportBundlePage } from '@/features/ReportBundle'; import { useTxt2ImgGalleryStore } from '@/stores/useTxt2ImgGalleryStore'; import { useImg2ImgGalleryStore } from '@/stores/useImg2ImgGalleryStore'; @@ -42,6 +44,10 @@ const Index = () => { return ; case "configurations": return ; + case "runregistry": + return ; + case "reportbundle": + return ; default: return ; } diff --git a/dream_layer_frontend/src/stores/useReportBundleStore.ts b/dream_layer_frontend/src/stores/useReportBundleStore.ts new file mode 100644 index 00000000..c6a26d38 --- /dev/null +++ b/dream_layer_frontend/src/stores/useReportBundleStore.ts @@ -0,0 +1,101 @@ +import { create } from 'zustand'; +import { ReportBundleState, ReportBundleActions } from '@/types/reportBundle'; + +interface ReportBundleStore extends ReportBundleState, ReportBundleActions {} + +const API_BASE_URL = 'http://localhost:5006/api'; + +export const useReportBundleStore = create((set, get) => ({ + // Initial state + generating: false, + error: null, + downloadUrl: null, + + // Actions + generateReport: async (runIds?: string[]) => { + set({ generating: true, error: null }); + try { + const response = await fetch(`${API_BASE_URL}/report-bundle`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + run_ids: runIds || [] + }), + }); + + const data = await response.json(); + + if (data.status === 'success') { + set({ + generating: false, + downloadUrl: `${API_BASE_URL}/report-bundle/download` + }); + } else { + set({ + error: data.message || 'Failed to generate report bundle', + generating: false + }); + } + } catch (error) { + set({ + error: error instanceof Error ? error.message : 'Failed to generate report bundle', + generating: false + }); + } + }, + + downloadReport: async () => { + const { downloadUrl } = get(); + if (!downloadUrl) { + set({ error: 'No report bundle available for download' }); + return; + } + + try { + const response = await fetch(downloadUrl); + if (response.ok) { + const blob = await response.blob(); + const url = window.URL.createObjectURL(blob); + const a = document.createElement('a'); + a.href = url; + a.download = 'report.zip'; + document.body.appendChild(a); + a.click(); + window.URL.revokeObjectURL(url); + document.body.removeChild(a); + } else { + set({ error: 'Failed to download report bundle' }); + } + } catch (error) { + set({ + error: error instanceof Error ? error.message : 'Failed to download report bundle' + }); + } + }, + + validateSchema: async (csvContent: string) => { + try { + const response = await fetch(`${API_BASE_URL}/report-bundle/validate`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + csv_content: csvContent + }), + }); + + const data = await response.json(); + return data.status === 'success' && data.valid; + } catch (error) { + console.error('Schema validation failed:', error); + return false; + } + }, + + clearError: () => { + set({ error: null }); + }, +})); \ No newline at end of file diff --git a/dream_layer_frontend/src/stores/useRunRegistryStore.ts b/dream_layer_frontend/src/stores/useRunRegistryStore.ts new file mode 100644 index 00000000..9d69818e --- /dev/null +++ b/dream_layer_frontend/src/stores/useRunRegistryStore.ts @@ -0,0 +1,83 @@ +import { create } from 'zustand'; +import { RunConfig, RunRegistryState, RunRegistryActions } from '@/types/runRegistry'; + +interface RunRegistryStore extends RunRegistryState, RunRegistryActions {} + +const API_BASE_URL = 'http://localhost:5005/api'; + +export const useRunRegistryStore = create((set, get) => ({ + // Initial state + runs: [], + loading: false, + error: null, + selectedRun: null, + + // Actions + fetchRuns: async () => { + set({ loading: true, error: null }); + try { + const response = await fetch(`${API_BASE_URL}/runs`); + const data = await response.json(); + + if (data.status === 'success' && data.runs) { + set({ runs: data.runs, loading: false }); + } else { + set({ error: data.message || 'Failed to fetch runs', loading: false }); + } + } catch (error) { + set({ + error: error instanceof Error ? error.message : 'Failed to fetch runs', + loading: false + }); + } + }, + + fetchRun: async (runId: string) => { + set({ loading: true, error: null }); + try { + const response = await fetch(`${API_BASE_URL}/runs/${runId}`); + const data = await response.json(); + + if (data.status === 'success' && data.run) { + set({ selectedRun: data.run, loading: false }); + } else { + set({ error: data.message || 'Failed to fetch run', loading: false }); + } + } catch (error) { + set({ + error: error instanceof Error ? error.message : 'Failed to fetch run', + loading: false + }); + } + }, + + deleteRun: async (runId: string) => { + try { + const response = await fetch(`${API_BASE_URL}/runs/${runId}`, { + method: 'DELETE', + }); + const data = await response.json(); + + if (data.status === 'success') { + // Remove the run from the local state + const { runs } = get(); + const updatedRuns = runs.filter(run => run.run_id !== runId); + set({ runs: updatedRuns }); + } else { + set({ error: data.message || 'Failed to delete run' }); + } + } catch (error) { + set({ + error: error instanceof Error ? error.message : 'Failed to delete run' + }); + } + }, + + clearError: () => { + set({ error: null }); + }, + + setSelectedRun: (run: RunConfig | null) => { + set({ selectedRun: run }); + }, +})); \ No newline at end of file diff --git a/dream_layer_frontend/src/types/reportBundle.ts b/dream_layer_frontend/src/types/reportBundle.ts new file mode 100644 index 00000000..50abd9ff --- /dev/null +++ b/dream_layer_frontend/src/types/reportBundle.ts @@ -0,0 +1,32 @@ +export interface ReportBundleRequest { + run_ids?: string[]; // Empty array means all runs +} + +export interface ReportBundleResponse { + status: 'success' | 'error'; + message?: string; + file_path?: string; +} + +export interface ReportBundleValidationRequest { + csv_content: string; +} + +export interface ReportBundleValidationResponse { + status: 'success' | 'error'; + valid: boolean; + message?: string; +} + +export interface ReportBundleState { + generating: boolean; + error: string | null; + downloadUrl: string | null; +} + +export interface ReportBundleActions { + generateReport: (runIds?: string[]) => Promise; + downloadReport: () => Promise; + validateSchema: (csvContent: string) => Promise; + clearError: () => void; +} \ No newline at end of file diff --git a/dream_layer_frontend/src/types/runRegistry.ts b/dream_layer_frontend/src/types/runRegistry.ts new file mode 100644 index 00000000..a4c5212c --- /dev/null +++ b/dream_layer_frontend/src/types/runRegistry.ts @@ -0,0 +1,53 @@ +export interface RunConfig { + run_id: string; + timestamp: string; + model: string; + vae?: string; + loras: Array<{ + name: string; + strength: number; + [key: string]: any; + }>; + controlnets: Array<{ + model: string; + strength: number; + enabled: boolean; + [key: string]: any; + }>; + prompt: string; + negative_prompt: string; + seed: number; + sampler: string; + steps: number; + cfg_scale: number; + width: number; + height: number; + batch_size: number; + batch_count: number; + workflow: Record; + version: string; + generated_images: string[]; + generation_type: 'txt2img' | 'img2img'; +} + +export interface RunRegistryResponse { + status: 'success' | 'error'; + runs?: RunConfig[]; + run?: RunConfig; + message?: string; +} + +export interface RunRegistryState { + runs: RunConfig[]; + loading: boolean; + error: string | null; + selectedRun: RunConfig | null; +} + +export interface RunRegistryActions { + fetchRuns: () => Promise; + fetchRun: (runId: string) => Promise; + deleteRun: (runId: string) => Promise; + clearError: () => void; + setSelectedRun: (run: RunConfig | null) => void; +} \ No newline at end of file diff --git a/start_dream_layer.bat b/start_dream_layer.bat index 08197b7b..169e1b46 100644 --- a/start_dream_layer.bat +++ b/start_dream_layer.bat @@ -123,7 +123,7 @@ echo %CYAN%================================================%NC% echo. :: Start Dream Layer backend (which also starts ComfyUI internally) -echo %BLUE%[STEP 1/4]%NC% Starting Dream Layer backend... +echo %BLUE%[STEP 1/6]%NC% Starting Dream Layer backend... start "Dream Layer Backend" /D "%CD%" cmd /c "chcp 65001 >nul && set PYTHONIOENCODING=utf-8 && python dream_layer_backend\dream_layer.py > logs\dream_layer.log 2>&1" :: Wait for backend to start @@ -131,23 +131,31 @@ echo %YELLOW%[INFO]%NC% Waiting for backend to initialize... timeout /t 5 /nobreak >nul :: Start txt2img_server.py -echo %BLUE%[STEP 2/4]%NC% Starting txt2img_server.py... +echo %BLUE%[STEP 2/6]%NC% Starting txt2img_server.py... start "Txt2Img Server" /D "%CD%\dream_layer_backend" cmd /c "chcp 65001 >nul && set PYTHONIOENCODING=utf-8 && python txt2img_server.py > ..\logs\txt2img_server.log 2>&1" :: Start img2img_server.py -echo %BLUE%[STEP 3/4]%NC% Starting img2img_server.py... +echo %BLUE%[STEP 3/6]%NC% Starting img2img_server.py... start "Img2Img Server" /D "%CD%\dream_layer_backend" cmd /c "chcp 65001 >nul && set PYTHONIOENCODING=utf-8 && python img2img_server.py > ..\logs\img2img_server.log 2>&1" :: Start extras.py -echo %BLUE%[STEP 4/4]%NC% Starting extras.py... +echo %BLUE%[STEP 4/6]%NC% Starting extras.py... start "Extras Server" /D "%CD%\dream_layer_backend" cmd /c "chcp 65001 >nul && set PYTHONIOENCODING=utf-8 && python extras.py > ..\logs\extras.log 2>&1" +:: Start run registry server +echo %BLUE%[STEP 5/6]%NC% Starting run registry server... +start "Run Registry Server" /D "%CD%\dream_layer_backend" cmd /c "chcp 65001 >nul && set PYTHONIOENCODING=utf-8 && python run_registry.py > ..\logs\run_registry.log 2>&1" + +:: Start report bundle server +echo %BLUE%[STEP 6/6]%NC% Starting report bundle server... +start "Report Bundle Server" /D "%CD%\dream_layer_backend" cmd /c "chcp 65001 >nul && set PYTHONIOENCODING=utf-8 && python report_bundle.py > ..\logs\report_bundle.log 2>&1" + :: Wait for all backend services to start echo %YELLOW%[INFO]%NC% Waiting for all backend services to initialize... timeout /t 10 /nobreak >nul :: Start frontend development server -echo %BLUE%[STEP 5/5]%NC% Starting frontend development server... +echo %BLUE%[STEP 7/7]%NC% Starting frontend development server... start "Dream Layer Frontend" /D "%CD%\dream_layer_frontend" cmd /c "npm run dev > ..\logs\frontend.log 2>&1" :: Wait for frontend to start @@ -164,6 +172,8 @@ echo. echo %GREEN%Frontend (Main UI):%NC% http://localhost:8080 echo %GREEN%ComfyUI Interface:%NC% http://localhost:8188 echo %GREEN%Backend API:%NC% http://localhost:5002 +echo %GREEN%Run Registry API:%NC% http://localhost:5005 +echo %GREEN%Report Bundle API:%NC% http://localhost:5006 echo. echo %BLUE%[INFO]%NC% Device Mode: %DEVICE_MODE% echo %BLUE%[INFO]%NC% Check logs in the 'logs' directory if you encounter issues @@ -186,6 +196,18 @@ if %errorlevel% neq 0 ( echo %YELLOW%[WARNING]%NC% Backend may still be starting up - please wait a moment ) +:: Test Run Registry +python -c "import requests; print('Run Registry Status:', 'OK' if requests.get('http://localhost:5005/api/runs', timeout=10).status_code == 200 else 'ERROR')" 2>nul +if %errorlevel% neq 0 ( + echo %YELLOW%[WARNING]%NC% Run Registry may still be starting up - please wait a moment +) + +:: Test Report Bundle +python -c "import requests; print('Report Bundle Status:', 'OK' if requests.get('http://localhost:5006/api/report-bundle/download', timeout=10).status_code == 200 else 'ERROR')" 2>nul +if %errorlevel% neq 0 ( + echo %YELLOW%[WARNING]%NC% Report Bundle may still be starting up - please wait a moment +) + echo. echo %GREEN%Dream Layer is now running! %NC% echo %CYAN%Open your browser and navigate to: http://localhost:8080%NC%