diff --git a/report_bundler/README.md b/report_bundler/README.md new file mode 100644 index 00000000..cfc59d9d --- /dev/null +++ b/report_bundler/README.md @@ -0,0 +1,42 @@ +# Report Bundler + +Hi! I'm Rachana. + +This is my submission for DreamLayer's Open Source Challenge. (Task #5 - Report Bundle) + +Why this Option? +As someone who’s worked on intelligent data pipelines and NLP automation tools, this challenge was a fun way to apply my real world experience into a compact, useful OSS tool. + +This task generates a reproducible `report.zip` containing: +- Metadata (`results.csv`) +- Generation config (`config.json`) +- Final grid images +- Schema validation + README + +--- + +## CSV Columns + +| Column | Description | +|---------------|-----------------------------------------------------------| +| image_path | Relative path to the grid image | +| sampler | Sampling algorithm used | +| steps | Number of inference steps | +| cfg | Classifier-Free Guidance scale | +| preset | Style or visual preset used | +| seed | Random seed for deterministic generation | +| width | Grid width in pixels (added for visual clarity) | +| height | Grid height in pixels (added for visual clarity) | +| grid_label | Custom label for the image (used in overlay or UX tags) | +| notes | Any human-readable notes about generation intent | + + + +The output is deterministic, simple to trace, and easy to integrate into DreamLayer workflows. + + +## How to Run + +```bash +cd report_bundler +python bundler.py diff --git a/report_bundler/bundler.py b/report_bundler/bundler.py new file mode 100644 index 00000000..c51ff3df --- /dev/null +++ b/report_bundler/bundler.py @@ -0,0 +1,83 @@ +import csv +import json +import zipfile +from pathlib import Path + +# These are the columns we expect in the results.csv +REQUIRED_COLUMNS = {"image_path", "sampler", "steps", "cfg", "preset", "seed"} + +def validate_csv_schema(csv_path): + """ + Opens the CSV file and checks if it has all required columns. + Handles missing headers and throws readable errors. + """ + with open(csv_path, newline='') as f: + reader = csv.DictReader(f) + + if reader.fieldnames is None: + raise ValueError("CSV file is empty or missing a header row.") + + header_fields = set(reader.fieldnames) + if not REQUIRED_COLUMNS.issubset(header_fields): + missing = REQUIRED_COLUMNS - header_fields + raise ValueError(f"Missing required columns: {missing}") + + return list(reader) + +def collect_files(csv_rows): + """ + From the rows in the CSV, grab all valid image paths. + Filters out rows with missing or empty 'image_path' fields. + """ + files = set() + for idx, row in enumerate(csv_rows): + if "image_path" not in row: + raise ValueError(f"Row {idx} missing 'image_path' key: {row}") + image_path = row["image_path"] + if image_path and image_path.strip(): + files.add(image_path) + else: + print(f"Skipping row {idx} due to empty image_path.") + return files + +def create_report_zip(output_path="report.zip"): + """ + This function: + - Validates the results.csv file + - Ensures all images listed exist and are safe + - Packages everything into report.zip + """ + base_dir = Path(__file__).parent + csv_path = base_dir / "results.csv" + config_path = base_dir / "config.json" + readme_path = base_dir / "README.md" + zip_path = base_dir / output_path + + # Step 1: Validate CSV + csv_rows = validate_csv_schema(csv_path) + + # Step 2: Collect all valid image paths + image_paths = collect_files(csv_rows) + + # Step 3: Zip all files + with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf: + zipf.write(csv_path, arcname="results.csv") + zipf.write(config_path, arcname="config.json") + zipf.write(readme_path, arcname="README.md") + + for path in image_paths: + norm_path = Path(path).resolve() + # Prevent path traversal by ensuring image is inside project directory + if ".." in path or not str(norm_path).startswith(str(base_dir.resolve())): + raise ValueError(f"🚨 Invalid image path: {path}") + + full_path = base_dir / path + if not full_path.exists(): + raise FileNotFoundError(f"Image not found: {full_path}") + + zipf.write(full_path, arcname=path) + + print(f"Done! Report created at: {zip_path}") + +if __name__ == "__main__": + create_report_zip() diff --git a/report_bundler/config.json b/report_bundler/config.json new file mode 100644 index 00000000..a764908c --- /dev/null +++ b/report_bundler/config.json @@ -0,0 +1,14 @@ +{ + "model": "SDXL", + "vae": "AutoencoderKL", + "loras": ["AnimeStyleV2"], + "controlnets": ["CannyEdgeDetector"], + "prompt": "a serene futuristic cityscape at sunset, ultra realistic, 8k", + "negative_prompt": "blurry, low quality, distorted", + "seed": 123456, + "sampler": "Euler", + "steps": 25, + "cfg": 7.5, + "workflow": "txt2img", + "version": "1.0.0" +} diff --git a/report_bundler/grids/image1.png b/report_bundler/grids/image1.png new file mode 100644 index 00000000..75229e86 Binary files /dev/null and b/report_bundler/grids/image1.png differ diff --git a/report_bundler/grids/image2.png b/report_bundler/grids/image2.png new file mode 100644 index 00000000..1be4bcb0 Binary files /dev/null and b/report_bundler/grids/image2.png differ diff --git a/report_bundler/report.zip b/report_bundler/report.zip new file mode 100644 index 00000000..4a3c9b91 Binary files /dev/null and b/report_bundler/report.zip differ diff --git a/report_bundler/results.csv b/report_bundler/results.csv new file mode 100644 index 00000000..082e0869 --- /dev/null +++ b/report_bundler/results.csv @@ -0,0 +1,3 @@ +image_path,sampler,steps,cfg,preset,seed,width,height,grid_label,notes +grids/image1.png,Euler,30,8.5,cyber-neural,108234,1024,1024,digital-consciousness,"Futuristic brain core visualization in a neon tech chamber" +grids/image2.png,DPM++ 2M Karras,28,7.8,ui-dreamscape,786512,1024,768,holographic-devices,"Concept art of interconnected digital devices in a glowy metaverse style" diff --git a/report_bundler/test_schema.py b/report_bundler/test_schema.py new file mode 100644 index 00000000..b26ded6b --- /dev/null +++ b/report_bundler/test_schema.py @@ -0,0 +1,15 @@ +import csv + +def test_csv_schema(): + """ + Make sure results.csv contains all required metadata columns. + You can add more optional fields in the future if needed. + """ + required = {"image_path", "sampler", "steps", "cfg", "preset", "seed"} + + with open("results.csv", newline='') as f: + reader = csv.DictReader(f) + assert reader.fieldnames is not None, "Missing header row in CSV" + header = set(reader.fieldnames) + + assert required.issubset(header), f"Missing columns: {required - header}"