Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 19 additions & 0 deletions .github/workflows/lint.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
name: Lint

on: [push, pull_request]

jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.12.2'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -e .[dev]
- name: Run linting
run: ./lint.sh
19 changes: 19 additions & 0 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
name: Test

on: [push, pull_request]

jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.12.2'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -e .[dev]
- name: Run tests
run: pytest
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
# NeuSim: An Open-source Simulator Framework for NPUs

[![CI Lint Status](https://github.com/platformxlab/NeuSim/actions/workflows/lint.yml/badge.svg?branch=main)](https://github.com/platformxlab/NeuSim/actions/workflows/lint.yml) [![CI Test Status](https://github.com/platformxlab/NeuSim/actions/workflows/test.yml/badge.svg?branch=main)](https://github.com/platformxlab/NeuSim/actions/workflows/test.yml)

NeuSim is a simulator framework for modeling the performance and power behaviors of neural processing units (NPUs) when running machine learning workloads.

### 📌 Neural Processing Unit 101
Expand Down
39 changes: 39 additions & 0 deletions lint.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
#!/bin/bash
# Note: set -e is not used globally to allow non-blocking checks to run without exiting immediately

FAILURE=0

echo "Running ruff check (Critical Errors)..."
# Critical errors verify correctness:
# E9: Syntax errors
# F821: Undefined name
# F822: Undefined export in __all__
# F823: Local variable referenced before assignment
ruff check --select E9,F821,F822,F823 .
if [ $? -ne 0 ]; then
echo "Critical linting errors found!"
FAILURE=1
fi

echo "Running ruff check (Warnings - Style/Complexity)..."
# Warnings (non-blocking):
# E: pycodestyle errors
# I: isort imports
# B: flake8-bugbear
# UP: pyupgrade
# F: All Pyflakes (including unused imports/variables) that are not in critical
ruff check --select E,I,B,UP,F --exit-zero .

echo "Running ruff format check (Non-blocking)..."
ruff format --check . || echo "Ruff format check failed (warning only)"

echo "Running mypy (Non-blocking)..."
mypy . || echo "Mypy type checking failed (warning only)"

if [ $FAILURE -ne 0 ]; then
echo "Linting failed due to critical errors."
exit 1
fi

echo "Linting passed."
exit 0
2 changes: 1 addition & 1 deletion neusim/configs/models/LLMConfig.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ def num_experts_per_token(self) -> int:
def __init__(self, **kwargs):
if "moe_d_ff" not in kwargs:
# If moe_d_ff is not provided, default to d_ff.
kwargs["moe_d_ff"] = kwargs["d_ff"]
kwargs["moe_d_ff"] = kwargs.get("d_ff", self.model_fields["d_ff"].default)
super().__init__(**kwargs)

def __hash__(self) -> int:
Expand Down
97 changes: 97 additions & 0 deletions neusim/configs/tests/test_chip_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
import unittest
from neusim.configs.chips.ChipConfig import ChipConfig

class TestChipConfig(unittest.TestCase):
def test_chip_config_properties(self):
# Create a default ChipConfig
config = ChipConfig()

# Test vmem_bw_GBps
# Default: num_vu_ports=6, freq_GHz=1.75
# Expected: 6 * 1.75 * 8 * 128 * 4 = 43008.0
self.assertAlmostEqual(config.vmem_bw_GBps, 43008.0)

# Test static_power_hbm_W
# Default: static_power_hbm_mc_W=10.264041296, static_power_hbm_phy_W=15.396061944
expected_static_hbm = 10.264041296 + 15.396061944
self.assertAlmostEqual(config.static_power_hbm_W, expected_static_hbm)

# Test peak_SA_tflops_per_sec
# Default: num_sa=8, sa_dim=128, freq_GHz=1.75
# Expected: 8 * (128^2) * 2 * 1.75 * 1e9 / 1e12 = 0.458752
expected_sa_tflops = 8 * (128 ** 2) * 2 * 1.75 / 1000
self.assertAlmostEqual(config.peak_SA_tflops_per_sec, expected_sa_tflops)

# Test peak_VU_tflops_per_sec
# Default: num_vu=6, freq_GHz=1.75
# Expected: 6 * (8 * 128) * 1.75 * 1e9 / 1e12 = 0.010752
expected_vu_tflops = 6 * (8 * 128) * 1.75 / 1000
self.assertAlmostEqual(config.peak_VU_tflops_per_sec, expected_vu_tflops)

# Test peak_tflops_per_sec
self.assertAlmostEqual(config.peak_tflops_per_sec, expected_sa_tflops + expected_vu_tflops)

# Test static_power_sa_W
# Default: static_power_W_per_sa=1.35868996, num_sa=8
expected_static_sa = 1.35868996 * 8
self.assertAlmostEqual(config.static_power_sa_W, expected_static_sa)

# Test static_power_vu_W
# Default: static_power_W_per_vu=0.475076728, num_vu=6
expected_static_vu = 0.475076728 * 6
self.assertAlmostEqual(config.static_power_vu_W, expected_static_vu)

# Test static_power_vmem_W_per_MB
# Default: static_power_vmem_W=24.21353615, vmem_size_MB=128
expected_static_vmem_per_mb = 24.21353615 / 128
self.assertAlmostEqual(config.static_power_vmem_W_per_MB, expected_static_vmem_per_mb)

# Test static_power_W
# Sum of components
expected_static_total = (
expected_static_sa +
expected_static_vu +
config.static_power_vmem_W +
config.static_power_ici_W +
expected_static_hbm +
config.static_power_other_W
)
self.assertAlmostEqual(config.static_power_W, expected_static_total)

# Test idle_power_W
self.assertAlmostEqual(config.idle_power_W, expected_static_total)

# Test dynamic_power_sa_W
# Default: dynamic_power_W_per_SA=28.19413333, num_sa=8
expected_dynamic_sa = 28.19413333 * 8
self.assertAlmostEqual(config.dynamic_power_sa_W, expected_dynamic_sa)

# Test dynamic_power_vu_W
# Default: dynamic_power_W_per_VU=2.65216, num_vu=6
expected_dynamic_vu = 2.65216 * 6
self.assertAlmostEqual(config.dynamic_power_vu_W, expected_dynamic_vu)

# Test dynamic_power_hbm_W
# Default: hbm_bw_GBps=2765, dynamic_power_hbm_W_per_GBps=0.01261538462
expected_dynamic_hbm = 2765 * 0.01261538462
self.assertAlmostEqual(config.dynamic_power_hbm_W, expected_dynamic_hbm)

# Test dynamic_power_ici_W
# Default: ici_bw_GBps=200, dynamic_power_ici_W_per_GBps=0.01767315271
expected_dynamic_ici = 200 * 0.01767315271
self.assertAlmostEqual(config.dynamic_power_ici_W, expected_dynamic_ici)

# Test dynamic_power_peak_W
# Sum of components
expected_dynamic_total = (
expected_dynamic_sa +
expected_dynamic_vu +
config.dynamic_power_vmem_W +
expected_dynamic_ici +
expected_dynamic_hbm +
config.dynamic_power_other_W
)
self.assertAlmostEqual(config.dynamic_power_peak_W, expected_dynamic_total)

# Test total_power_peak_W
self.assertAlmostEqual(config.total_power_peak_W, expected_static_total + expected_dynamic_total)
17 changes: 17 additions & 0 deletions neusim/configs/tests/test_dit_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
import unittest
from neusim.configs.models.DiTConfig import DiTConfig

class TestDiTConfig(unittest.TestCase):
def test_dit_config_instantiation(self):
# Create DiTConfig
config = DiTConfig(
image_width=256,
num_channels=3,
patch_size=16,
num_diffusion_steps=1000
)

self.assertEqual(config.model_type, "dit")
self.assertEqual(config.image_width, 256)
self.assertEqual(config.num_channels, 3)
self.assertEqual(config.input_seqlen, 0)
23 changes: 23 additions & 0 deletions neusim/configs/tests/test_dlrm_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
import unittest
from neusim.configs.models.DLRMConfig import DLRMConfig, MLPLayerConfig

class TestDLRMConfig(unittest.TestCase):
def test_dlrm_config_instantiation(self):
# Create DLRMConfig
bottom_mlp = [MLPLayerConfig(in_features=10, out_features=20)]
top_mlp = [MLPLayerConfig(in_features=20, out_features=1)]

config = DLRMConfig(
model_type="dlrm",
embedding_dim=64,
num_indices_per_lookup=[100, 200],
embedding_table_sizes=[1000, 2000],
num_dense_features=10,
bottom_mlp_config=bottom_mlp,
top_mlp_config=top_mlp
)

self.assertEqual(config.model_type, "dlrm")
self.assertEqual(config.embedding_dim, 64)
self.assertEqual(len(config.bottom_mlp_config), 1)
self.assertEqual(config.bottom_mlp_config[0].in_features, 10)
18 changes: 18 additions & 0 deletions neusim/configs/tests/test_gligen_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
import unittest
from neusim.configs.models.GLIGENConfig import GLIGENConfig

class TestGLIGENConfig(unittest.TestCase):
def test_gligen_config_instantiation(self):
# Create GLIGENConfig
config = GLIGENConfig()

self.assertEqual(config.model_type, "gligen")
self.assertEqual(config.num_diffusion_steps, 1)

# Test nested configs default values
self.assertEqual(config.fourier_embedder_config.num_freqs, 64)
self.assertEqual(config.text_embedder_config.d_model, 512)
self.assertEqual(config.image_embedder_config.d_model, 1024)
self.assertEqual(config.spatial_condition_embedder_config.stem.in_channels, 3)
self.assertEqual(config.grounding_input_config.text.input_seqlen, 512)
self.assertEqual(config.unet_config.model_channels, 320)
88 changes: 88 additions & 0 deletions neusim/configs/tests/test_llm_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
import unittest
from neusim.configs.models.LLMConfig import LLMConfig, MoELLMConfig, DeepSeekConfig

class TestLLMConfig(unittest.TestCase):
def test_llm_config(self):
# Test default init
config = LLMConfig()
# Default: num_heads=64, num_kv_heads should default to num_heads if not provided
self.assertEqual(config.num_heads, 64)
self.assertEqual(config.num_kv_heads, 64)

# Test valid init with num_kv_heads
config_mqa = LLMConfig(num_kv_heads=1)
self.assertEqual(config_mqa.num_kv_heads, 1)

# Test hash
config1 = LLMConfig()
config2 = LLMConfig()
self.assertEqual(hash(config1), hash(config2))

class TestMoELLMConfig(unittest.TestCase):
def test_moe_llm_config(self):
# Test default init
config = MoELLMConfig()
# Default: d_ff=11008, moe_d_ff should default to d_ff
self.assertEqual(config.d_ff, 11008)
self.assertEqual(config.moe_d_ff, 11008)

# Test custom moe_d_ff
config_custom = MoELLMConfig(moe_d_ff=2048)
self.assertEqual(config_custom.moe_d_ff, 2048)

# Test expert_tensor_parallelism_degree
# dp=1, tp=1, ep=1 -> 1*1 // 1 = 1
self.assertEqual(config.expert_tensor_parallelism_degree, 1)

# dp=2, tp=2, ep=2 -> 2*2 // 2 = 2
config_parallel = MoELLMConfig(
data_parallelism_degree=2,
tensor_parallelism_degree=2,
expert_parallelism_degree=2
)
self.assertEqual(config_parallel.expert_tensor_parallelism_degree, 2)

# Test num_expert_tensor_parallel_axes
# ndp=1, ntp=1, nep=1 -> 1+1-1 = 1
self.assertEqual(config.num_expert_tensor_parallel_axes, 1)

# ndp=2, ntp=2, nep=2 -> 2+2-2 = 2
config_axes = MoELLMConfig(
num_data_parallel_axes=2,
num_tensor_parallel_axes=2,
num_expert_parallel_axes=2
)
self.assertEqual(config_axes.num_expert_tensor_parallel_axes, 2)

# Test num_experts_per_token
# shared=1, routed=8 -> 9
self.assertEqual(config.num_experts_per_token, 9)

# Test hash
config1 = MoELLMConfig()
config2 = MoELLMConfig()
self.assertEqual(hash(config1), hash(config2))

class TestDeepSeekConfig(unittest.TestCase):
def test_deepseek_config(self):
config = DeepSeekConfig(
kv_lora_rank=16,
q_lora_rank=32,
qk_rope_head_dim=64,
qk_nope_head_dim=64,
v_head_dim=128
)

# Test qk_head_dim
# 64 + 64 = 128
self.assertEqual(config.qk_head_dim, 128)

# Test hash
config2 = DeepSeekConfig(
kv_lora_rank=16,
q_lora_rank=32,
qk_rope_head_dim=64,
qk_nope_head_dim=64,
v_head_dim=128
)
self.assertEqual(hash(config), hash(config2))
35 changes: 35 additions & 0 deletions neusim/configs/tests/test_model_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
import unittest
from neusim.configs.models.ModelConfig import ModelConfig

class TestModelConfig(unittest.TestCase):
def test_model_config_hash(self):
# Create two identical ModelConfigs
config1 = ModelConfig(
model_type="llm",
model_name="test_model",
name="test_chip",
global_batch_size=8,
num_chips=4
)
config2 = ModelConfig(
model_type="llm",
model_name="test_model",
name="test_chip",
global_batch_size=8,
num_chips=4
)

# Check if their hash is the same
self.assertEqual(hash(config1), hash(config2))

# Create a different ModelConfig
config3 = ModelConfig(
model_type="llm",
model_name="test_model_diff",
name="test_chip",
global_batch_size=8,
num_chips=4
)

# Check if their hash is different (likely)
self.assertNotEqual(hash(config1), hash(config3))
16 changes: 16 additions & 0 deletions neusim/configs/tests/test_system_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
import unittest
from neusim.configs.systems.SystemConfig import SystemConfig

class TestSystemConfig(unittest.TestCase):
def test_system_config_instantiation(self):
# Create a default SystemConfig
config = SystemConfig()

# Test default values
self.assertEqual(config.PUE, 1.1)
self.assertEqual(config.carbon_intensity_kgCO2_per_kWh, 0.5)

# Test custom values
config_custom = SystemConfig(PUE=1.2, carbon_intensity_kgCO2_per_kWh=0.6)
self.assertEqual(config_custom.PUE, 1.2)
self.assertEqual(config_custom.carbon_intensity_kgCO2_per_kWh, 0.6)
Loading