-
Notifications
You must be signed in to change notification settings - Fork 210
PR #2 - Task 4. Compute Traces - Joe Black #98
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
4d841c0
47524bb
60a9ecd
8255ab2
cf5b79e
85222a9
b1e5927
d84d791
04520bf
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1 @@ | ||
| 3.11.9 |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1 @@ | ||
| timestamp,total_time_s,images_generated,time_per_image_s,gpu_name,driver_version,ckpt_name |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,4 @@ | ||
| timestamp,total_time_s,images_generated,time_per_image_s,gpu_name,driver_version,ckpt_name | ||
| 1754671400.091321,0.0126,0,,CPU,N/A, | ||
| 1754683081.648606,301.4291,0,,CPU,N/A, | ||
| 1754683236.368768,301.5447,0,,CPU,N/A, |
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
|
|
@@ -18,18 +18,47 @@ | |||||
| import logging | ||||||
| from dream_layer import get_directories | ||||||
| from extras import COMFY_INPUT_DIR | ||||||
| from pathlib import Path | ||||||
|
|
||||||
|
|
||||||
| # Initialize logger | ||||||
| logger = logging.getLogger(__name__) | ||||||
|
|
||||||
| def get_available_checkpoints(): | ||||||
| logger.info(f"Current __file__ path: {__file__}") | ||||||
| root_dir = Path(__file__).resolve().parent.parent | ||||||
| logger.info(f"Resolved root_dir: {root_dir}") | ||||||
|
|
||||||
| checkpoints_dir = root_dir / "ComfyUI" / "models" / "checkpoints" | ||||||
| logger.info(f"Looking for checkpoints in: {checkpoints_dir}") | ||||||
|
|
||||||
| if not checkpoints_dir.exists(): | ||||||
| logger.error(f"Checkpoints directory does not exist: {checkpoints_dir}") | ||||||
| return [] | ||||||
|
|
||||||
| models = [f.name for f in checkpoints_dir.glob("*") if f.suffix in ['.safetensors', '.ckpt']] | ||||||
| logger.info(f"Found checkpoint files: {models}") | ||||||
| return models | ||||||
|
Comment on lines
+23
to
+39
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🛠️ Refactor suggestion Consider extracting the shared checkpoint discovery logic. Both Consider moving this function to a shared module: # In a new file: dream_layer_backend/shared_checkpoint_utils.py
from pathlib import Path
import logging
logger = logging.getLogger(__name__)
def get_available_checkpoints():
"""Get list of available checkpoint files from the models directory."""
root_dir = Path(__file__).resolve().parent.parent
checkpoints_dir = root_dir / "ComfyUI" / "models" / "checkpoints"
logger.info(f"Looking for checkpoints in: {checkpoints_dir}")
if not checkpoints_dir.exists():
logger.error(f"Checkpoints directory does not exist: {checkpoints_dir}")
return []
try:
models = [f.name for f in checkpoints_dir.glob("*") if f.suffix in ['.safetensors', '.ckpt']]
logger.info(f"Found checkpoint files: {models}")
return models
except Exception as e:
logger.error(f"Failed to list checkpoints: {e}")
return []Then import and use it in both workflow files. 🤖 Prompt for AI Agents |
||||||
|
|
||||||
| def transform_to_img2img_workflow(data): | ||||||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. issue (code-quality): We've found these issues:
ExplanationThe quality score for this function is below the quality threshold of 25%. This score is a combination of the method length, cognitive complexity and working memory. How can you solve this? It might be worth refactoring this function to make it shorter and more readable.
|
||||||
| """ | ||||||
| Transform frontend request data into ComfyUI workflow format for img2img | ||||||
| """ | ||||||
|
|
||||||
| # Determine model type and features | ||||||
| model_name = data.get('model_name', 'v1-6-pruned-emaonly-fp16.safetensors') | ||||||
| # Dynamically determine the model name that's being used and validate | ||||||
| requested_model = data.get("model_name") | ||||||
| available_models = get_available_checkpoints() | ||||||
| if not available_models: | ||||||
| raise FileNotFoundError("No checkpoint models found in ComfyUI models/checkpoints directory") | ||||||
|
|
||||||
| # Use requested model if valid, else fallback to detected | ||||||
| if requested_model and requested_model in available_models: | ||||||
| model_name = requested_model | ||||||
| else: | ||||||
| # fallback to first available checkpoint and log the fallback | ||||||
| model_name = available_models[0] | ||||||
| logger.warning(f"Requested model '{requested_model}' not found. Falling back to '{model_name}'.") | ||||||
|
|
||||||
| #model_name = data.get('model_name', 'v1-6-pruned-emaonly-fp16.safetensors') # was hardcoded | ||||||
|
|
||||||
| use_controlnet = bool(data.get('controlnet')) | ||||||
| use_lora = bool(data.get('lora')) | ||||||
|
|
||||||
|
|
@@ -80,7 +109,7 @@ def transform_to_img2img_workflow(data): | |||||
| denoising_strength = max( | ||||||
| 0.0, min(1.0, float(data.get('denoising_strength', 0.75)))) | ||||||
| input_image = data.get('input_image', '') | ||||||
| model_name = data.get('model_name', 'v1-6-pruned-emaonly-fp16.safetensors') | ||||||
| #model_name = data.get('model_name', 'v1-6-pruned-emaonly-fp16.safetensors') | ||||||
| sampler_name = data.get('sampler_name', 'euler') | ||||||
| scheduler = data.get('scheduler', 'normal') | ||||||
|
|
||||||
|
|
@@ -226,7 +255,19 @@ def transform_to_img2img_workflow(data): | |||||
| if refiner_data['refiner_enabled']: | ||||||
| logger.info("Injecting Refiner parameters...") | ||||||
| workflow = inject_refiner_parameters(workflow, refiner_data) | ||||||
|
|
||||||
|
|
||||||
| print(f"✅ Workflow transformation complete") | ||||||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Remove extraneous f-string prefix. The f-string on line 259 doesn't contain any placeholders, making the - print(f"✅ Workflow transformation complete")
+ print("✅ Workflow transformation complete")📝 Committable suggestion
Suggested change
🧰 Tools🪛 Ruff (0.12.2)259-259: f-string without any placeholders Remove extraneous (F541) 🤖 Prompt for AI Agents |
||||||
| # Ensure dump directory exists | ||||||
| dump_dir = os.path.join(os.path.dirname(__file__), "workflow_dumps") | ||||||
| os.makedirs(dump_dir, exist_ok=True) | ||||||
|
|
||||||
| # Save the workflow JSON | ||||||
| output_path = os.path.join(dump_dir, "last_workflow.json") | ||||||
| with open(output_path, "w") as f: | ||||||
| json.dump(workflow, f, indent=2) | ||||||
|
|
||||||
| print(f"📋 Generated workflow JSON: {json.dumps(workflow, indent=2)}") | ||||||
| print(f"🚀 Workflow JSON saved to {output_path}") | ||||||
| return workflow | ||||||
|
|
||||||
|
|
||||||
|
|
||||||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,11 @@ | ||
| timestamp,total_time_s,images_generated,time_per_image_s,gpu_name,driver_version | ||
| 1754796517.274257,0.1072,0,,CPU,N/A | ||
| 1754799939.5604439,0.0121,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1754800554.222678,0.0363,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1754803186.83248,0.0754,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1754833176.129069,0.0829,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1754841283.544258,0.0107,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1754846601.354629,0.0118,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1754846604.915974,0.0045,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1754847081.994686,301.2454,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1754924526.325828,301.6547,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,2 @@ | ||
| timestamp,total_time_s,images_generated,time_per_image_s,gpu_name,driver_version | ||
| 1754796516.524883,301.3008,0,,CPU,N/A |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,36 @@ | ||
| timestamp,total_time_s,images_generated,time_per_image_s,gpu_name,driver_version,ckpt_name | ||
| 1754803186.3610072,301.3461,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1754833175.356676,301.1881,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1754841283.3543549,0.0102,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1754844427.844616,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1754844771.521109,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1754844897.207525,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1754844928.5717719,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1754845078.4543412,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1754845256.875231,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1754845318.64412,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1754845646.243946,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1754845676.958377,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1754846119.498097,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1754846290.427422,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1754846316.227009,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1754846425.6457322,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1754846462.971039,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1754846601.165691,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1754846604.743495,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1754846780.546242,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1754924224.424215,0.0,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1754926591.025748,301.3167,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1754948761.507086,301.2014,0,,CPU,N/A,unknown | ||
| 1754953737.084278,301.574,0,,CPU,N/A,unknown | ||
| 1755031482.6354072,301.4959,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1755031704.523446,80.554,1,80.554,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1755032728.004299,0.4533,1,0.4533,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1755033409.806452,301.5118,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1755033868.0322208,301.699,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1755039709.030485,301.1955,0,,CPU,N/A,juggernautXL_v8Rundiffusion.safetensors | ||
| 1755041998.9101171,300.3344,0,,CPU,N/A,unknown | ||
| 1755042012.7522612,301.2818,0,,CPU,N/A,unknown | ||
| 1755072753.412419,300.5095,0,,CPU,N/A,unknown | ||
| 1755100109.236161,301.1258,0,,CPU,N/A,unknown | ||
| 1755275022.702453,301.1851,0,,CPU,N/A,unknown |
Large diffs are not rendered by default.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,42 @@ | ||
| import base64 | ||
| from pathlib import Path | ||
|
|
||
| # Folder containing images | ||
| folder = Path(__file__).parent # assuming this script is inside test_image folder | ||
|
|
||
| # Supported image extensions | ||
| img_extensions = {".png", ".jpg", ".jpeg", ".bmp", ".gif"} | ||
|
|
||
| # Find first image file in folder | ||
| image_files = [f for f in folder.iterdir() if f.suffix.lower() in img_extensions] | ||
|
|
||
| if not image_files: | ||
| raise FileNotFoundError(f"No image files found in {folder}") | ||
|
|
||
| input_img_path = image_files[0] | ||
| base64_txt_path = folder / "base64_txt_test_image.txt" | ||
| output_img_path = folder / "test_image.png" | ||
|
|
||
| print(f"Using input image: {input_img_path.name}") | ||
|
|
||
| # Read image bytes | ||
| with open(input_img_path, "rb") as f: | ||
| img_bytes = f.read() | ||
|
|
||
| # Encode to base64 string | ||
| base64_img = base64.b64encode(img_bytes).decode("utf-8") | ||
|
|
||
| # Save base64 string to 4.txt | ||
| with open(base64_txt_path, "w") as f: | ||
| f.write(base64_img) | ||
|
|
||
| print(f"Saved base64 string to {base64_txt_path}") | ||
|
|
||
| # Decode base64 string back to bytes | ||
| decoded_bytes = base64.b64decode(base64_img) | ||
|
|
||
| # Save decoded bytes as new PNG file | ||
| with open(output_img_path, "wb") as f: | ||
| f.write(decoded_bytes) | ||
|
|
||
| print(f"Saved decoded image as {output_img_path}") |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
suggestion: Suffix filtering may miss files with double extensions.
Using 'f.suffix' will not detect files with compound extensions like '.ckpt.safetensors'. 'str.endswith' is recommended for accurate filtering.