From 96f295bc9f22a40b7311e6e2007b52de2e096771 Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Wed, 11 Feb 2026 13:34:43 +0530 Subject: [PATCH 01/35] Added config management integration with guardrails --- backend/app/core/config.py | 1 + backend/app/services/llm/guardrails_config.py | 78 +++++++++++++++++++ backend/app/services/llm/jobs.py | 7 +- backend/app/tests/services/llm/test_jobs.py | 34 ++++++-- 4 files changed, 113 insertions(+), 7 deletions(-) create mode 100644 backend/app/services/llm/guardrails_config.py diff --git a/backend/app/core/config.py b/backend/app/core/config.py index a7cb7376..c2d20993 100644 --- a/backend/app/core/config.py +++ b/backend/app/core/config.py @@ -51,6 +51,7 @@ class Settings(BaseSettings): POSTGRES_DB: str = "" KAAPI_GUARDRAILS_AUTH: str = "" KAAPI_GUARDRAILS_URL: str = "" + KAAPI_GUARDRAILS_CONFIG_URL: str = "" @computed_field # type: ignore[prop-decorator] @property diff --git a/backend/app/services/llm/guardrails_config.py b/backend/app/services/llm/guardrails_config.py new file mode 100644 index 00000000..8e3113bf --- /dev/null +++ b/backend/app/services/llm/guardrails_config.py @@ -0,0 +1,78 @@ +import logging + +import httpx + +from app.core.config import settings + +logger = logging.getLogger(__name__) + +def fetch_guardrails_config( + organization_id: int, project_id: int +) -> tuple[list[dict], list[dict]]: + """ + Fetch guardrail validators and split them into input/output by stage. + + Args: + organization_id: Organization id + project_id: Project id + + Retruns: + List of validators for the given organization id and project id. + """ + + headers = { + "accept": "application/json", + "Authorization": f"Bearer {settings.KAAPI_GUARDRAILS_AUTH}", + } + + try: + with httpx.Client(timeout=10.0) as client: + response = client.get( + settings.KAAPI_GUARDRAILS_CONFIG_URL, + params={ + "organization_id": organization_id, + "project_id": project_id, + }, + headers=headers, + ) + + response.raise_for_status() + payload = response.json() + + logger.info(f"Added payload: {payload}") + + if not payload.get("success"): + logger.warning( + "[fetch_guardrails_config] Guardrails config API returned unsuccessful response. " + f"organization_id={organization_id}, project_id={project_id}" + ) + return [], [] + + validators = payload.get("data") + if not isinstance(validators, list): + logger.warning( + "[fetch_guardrails_config] Guardrails config response has invalid `data` format. " + f"organization_id={organization_id}, project_id={project_id}" + ) + return [], [] + + input_guardrails = [ + validator + for validator in validators + if isinstance(validator, dict) + and str(validator.get("stage", "")).lower() == "input" + ] + output_guardrails = [ + validator + for validator in validators + if isinstance(validator, dict) + and str(validator.get("stage", "")).lower() == "output" + ] + return input_guardrails, output_guardrails + + except Exception as e: + logger.warning( + "[fetch_guardrails_config] Failed to fetch guardrails config. " + f"organization_id={organization_id}, project_id={project_id}, error={e}" + ) + return [], [] diff --git a/backend/app/services/llm/jobs.py b/backend/app/services/llm/jobs.py index 492c1dc2..a65d6262 100644 --- a/backend/app/services/llm/jobs.py +++ b/backend/app/services/llm/jobs.py @@ -14,6 +14,7 @@ from app.models import JobStatus, JobType, JobUpdate, LLMCallRequest from app.models.llm.request import ConfigBlob, LLMCallConfig, KaapiCompletionConfig from app.services.llm.guardrails import call_guardrails +from app.services.llm.guardrails_config import fetch_guardrails_config from app.services.llm.providers.registry import get_llm_provider from app.services.llm.mappers import transform_kaapi_config_to_native from app.utils import APIResponse, send_callback @@ -136,8 +137,10 @@ def execute_job( # one of (id, version) or blob is guaranteed to be present due to prior validation config = request.config input_query = request.query.input - input_guardrails = request.input_guardrails - output_guardrails = request.output_guardrails + input_guardrails, output_guardrails = fetch_guardrails_config( + organization_id=organization_id, + project_id=project_id, + ) callback_response = None config_blob: ConfigBlob | None = None diff --git a/backend/app/tests/services/llm/test_jobs.py b/backend/app/tests/services/llm/test_jobs.py index bae15c26..3daadad2 100644 --- a/backend/app/tests/services/llm/test_jobs.py +++ b/backend/app/tests/services/llm/test_jobs.py @@ -255,6 +255,10 @@ def job_env(self, db, mock_llm_response): patch("app.services.llm.jobs.Session") as mock_session_class, patch("app.services.llm.jobs.get_llm_provider") as mock_get_provider, patch("app.services.llm.jobs.send_callback") as mock_send_callback, + patch( + "app.services.llm.jobs.fetch_guardrails_config", + return_value=([], []), + ), ): mock_session_class.return_value.__enter__.return_value = db mock_session_class.return_value.__exit__.return_value = None @@ -761,7 +765,11 @@ def test_guardrails_sanitize_input_before_provider( "callback_url": None, } - result = self._execute_job(job_for_execution, db, request_data) + with patch( + "app.services.llm.jobs.fetch_guardrails_config", + return_value=([{"type": "pii_remover"}], []), + ): + result = self._execute_job(job_for_execution, db, request_data) provider_query = env["provider"].execute.call_args[0][1] assert "[REDACTED]" in provider_query.input @@ -801,7 +809,11 @@ def test_guardrails_sanitize_output_after_provider( "output_guardrails": [{"type": "pii_remover"}], } - result = self._execute_job(job_for_execution, db, request_data) + with patch( + "app.services.llm.jobs.fetch_guardrails_config", + return_value=([], [{"type": "pii_remover"}]), + ): + result = self._execute_job(job_for_execution, db, request_data) assert "REDACTED" in result["data"]["response"]["output"]["text"] @@ -837,7 +849,11 @@ def test_guardrails_bypass_does_not_modify_input( "input_guardrails": [{"type": "pii_remover"}], } - self._execute_job(job_for_execution, db, request_data) + with patch( + "app.services.llm.jobs.fetch_guardrails_config", + return_value=([{"type": "pii_remover"}], []), + ): + self._execute_job(job_for_execution, db, request_data) provider_query = env["provider"].execute.call_args[0][1] assert provider_query.input == unsafe_input @@ -866,7 +882,11 @@ def test_guardrails_validation_failure_blocks_job( "input_guardrails": [{"type": "uli_slur_match"}], } - result = self._execute_job(job_for_execution, db, request_data) + with patch( + "app.services.llm.jobs.fetch_guardrails_config", + return_value=([{"type": "uli_slur_match"}], []), + ): + result = self._execute_job(job_for_execution, db, request_data) assert not result["success"] assert "Unsafe content" in result["error"] @@ -900,7 +920,11 @@ def test_guardrails_rephrase_needed_blocks_job( "input_guardrails": [{"type": "policy"}], } - result = self._execute_job(job_for_execution, db, request_data) + with patch( + "app.services.llm.jobs.fetch_guardrails_config", + return_value=([{"type": "policy"}], []), + ): + result = self._execute_job(job_for_execution, db, request_data) assert not result["success"] env["provider"].execute.assert_not_called() From acaf633e42a7980f77a366d13ad293454ea7208e Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Thu, 12 Feb 2026 20:37:43 +0530 Subject: [PATCH 02/35] Added guardrails in config and version API --- ..._guardrails_config_id_to_config_version.py | 32 ++++++ backend/app/api/routes/config/config.py | 4 +- backend/app/api/routes/config/version.py | 6 +- backend/app/crud/config/config.py | 25 ++++- backend/app/crud/config/version.py | 20 +++- backend/app/models/config/version.py | 6 ++ backend/app/models/llm/request.py | 31 +++--- backend/app/services/llm/guardrails.py | 102 +++++++++++++++++- 8 files changed, 198 insertions(+), 28 deletions(-) create mode 100644 backend/app/alembic/versions/046_add_guardrails_config_id_to_config_version.py diff --git a/backend/app/alembic/versions/046_add_guardrails_config_id_to_config_version.py b/backend/app/alembic/versions/046_add_guardrails_config_id_to_config_version.py new file mode 100644 index 00000000..67704925 --- /dev/null +++ b/backend/app/alembic/versions/046_add_guardrails_config_id_to_config_version.py @@ -0,0 +1,32 @@ +"""Add guardrails_config_id to config_version + +Revision ID: 046 +Revises: 045 +Create Date: 2026-02-12 12:30:00.000000 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = "046" +down_revision = "045" +branch_labels = None +depends_on = None + + +def upgrade(): + op.add_column( + "config_version", + sa.Column( + "guardrails_config_id", + sa.Uuid(), + nullable=True, + comment="Reference to the kaapi_guardrails validator configuration", + ), + ) + + +def downgrade(): + op.drop_column("config_version", "guardrails_config_id") diff --git a/backend/app/api/routes/config/config.py b/backend/app/api/routes/config/config.py index 6d262944..478505ce 100644 --- a/backend/app/api/routes/config/config.py +++ b/backend/app/api/routes/config/config.py @@ -33,7 +33,9 @@ def create_config( """ create new config along with initial version """ - config_crud = ConfigCrud(session=session, project_id=current_user.project_.id) + project_id = current_user.project_.id + organization_id = current_user.organization_.id + config_crud = ConfigCrud(session, project_id, organization_id) config, version = config_crud.create_or_raise(config_create) response = ConfigWithVersion(**config.model_dump(), version=version) diff --git a/backend/app/api/routes/config/version.py b/backend/app/api/routes/config/version.py index 5f3e8626..292742cb 100644 --- a/backend/app/api/routes/config/version.py +++ b/backend/app/api/routes/config/version.py @@ -32,9 +32,9 @@ def create_version( Create a new version for an existing configuration. The version number is automatically incremented. """ - version_crud = ConfigVersionCrud( - session=session, project_id=current_user.project_.id, config_id=config_id - ) + project_id = current_user.project_.id + organization_id = current_user.organization_.id + version_crud = ConfigVersionCrud(session, config_id, project_id, organization_id) version = version_crud.create_or_raise(version_create=version_create) return APIResponse.success_response( diff --git a/backend/app/crud/config/config.py b/backend/app/crud/config/config.py index 69d4bced..37e8c3e2 100644 --- a/backend/app/crud/config/config.py +++ b/backend/app/crud/config/config.py @@ -1,6 +1,6 @@ import logging -from uuid import UUID -from typing import Tuple +from uuid import UUID, uuid4 +from typing import Optional, Tuple from sqlmodel import Session, select, and_ from fastapi import HTTPException @@ -12,6 +12,7 @@ ConfigVersion, ) from app.core.util import now +from app.services.llm.guardrails import create_guardrails_validators_if_present logger = logging.getLogger(__name__) @@ -21,9 +22,10 @@ class ConfigCrud: CRUD operations for configurations scoped to a project. """ - def __init__(self, session: Session, project_id: int): + def __init__(self, session: Session, project_id: int, organization_id: Optional[int] = None): self.session = session self.project_id = project_id + self.organization_id = organization_id def create_or_raise( self, config_create: ConfigCreate @@ -34,6 +36,7 @@ def create_or_raise( self._check_unique_name_or_raise(config_create.name) try: + guardrails_config_id=uuid4() config = Config( name=config_create.name, description=config_create.description, @@ -43,12 +46,24 @@ def create_or_raise( self.session.add(config) self.session.flush() # Flush to get the config.id - # Create the initial version + config_blob = config_create.config_blob.model_dump( + exclude={"guardrails"} + ) + create_guardrails_validators_if_present( + guardrails=config_create.config_blob.guardrails, + guardrails_config_id=guardrails_config_id, + organization_id=self.organization_id, + project_id=self.project_id, + ) + + # Create the initial version. Guardrails are stored externally via + # guardrails_config_id and should not be persisted in config_blob. version = ConfigVersion( config_id=config.id, version=1, - config_blob=config_create.config_blob.model_dump(), + config_blob=config_blob, commit_message=config_create.commit_message, + guardrails_config_id=guardrails_config_id, ) self.session.add(version) diff --git a/backend/app/crud/config/version.py b/backend/app/crud/config/version.py index f834c168..df449cbd 100644 --- a/backend/app/crud/config/version.py +++ b/backend/app/crud/config/version.py @@ -1,5 +1,6 @@ import logging -from uuid import UUID +from typing import Optional +from uuid import UUID, uuid4 from sqlmodel import Session, select, and_, func from fastapi import HTTPException @@ -8,6 +9,7 @@ from .config import ConfigCrud from app.core.util import now from app.models import Config, ConfigVersion, ConfigVersionCreate, ConfigVersionItems +from app.services.llm.guardrails import create_guardrails_validators_if_present logger = logging.getLogger(__name__) @@ -17,10 +19,11 @@ class ConfigVersionCrud: CRUD operations for configuration versions scoped to a project. """ - def __init__(self, session: Session, config_id: UUID, project_id: int): + def __init__(self, session: Session, config_id: UUID, project_id: int, organization_id: Optional[int] = None): self.session = session self.project_id = project_id self.config_id = config_id + self.organization_id = organization_id def create_or_raise(self, version_create: ConfigVersionCreate) -> ConfigVersion: """ @@ -30,12 +33,23 @@ def create_or_raise(self, version_create: ConfigVersionCreate) -> ConfigVersion: self._config_exists_or_raise(self.config_id) try: next_version = self._get_next_version(self.config_id) + guardrails_config_id=uuid4() version = ConfigVersion( config_id=self.config_id, version=next_version, - config_blob=version_create.config_blob.model_dump(), + config_blob=version_create.config_blob.model_dump( + exclude={"guardrails"} + ), commit_message=version_create.commit_message, + guardrails_config_id=guardrails_config_id + ) + + create_guardrails_validators_if_present( + guardrails=version_create.config_blob.guardrails, + guardrails_config_id=guardrails_config_id, + organization_id=self.organization_id, + project_id=self.project_id, ) self.session.add(version) diff --git a/backend/app/models/config/version.py b/backend/app/models/config/version.py index 5a374582..988e5c74 100644 --- a/backend/app/models/config/version.py +++ b/backend/app/models/config/version.py @@ -62,6 +62,12 @@ class ConfigVersion(ConfigVersionBase, table=True): ondelete="CASCADE", sa_column_kwargs={"comment": "Reference to the parent configuration"}, ) + + guardrails_config_id: UUID = Field( + nullable=True, + sa_column_kwargs={"comment": "Reference to the kaapi_guardrails validator configuration"}, + ) + version: int = Field( nullable=False, description="Version number starting at 1", diff --git a/backend/app/models/llm/request.py b/backend/app/models/llm/request.py index 4da892e9..88252059 100644 --- a/backend/app/models/llm/request.py +++ b/backend/app/models/llm/request.py @@ -119,11 +119,28 @@ class KaapiCompletionConfig(SQLModel): Field(discriminator="provider"), ] +class GuardrailsConfig(SQLModel): + """Guardrails configuration applied during validation.""" + + input: list[dict[str, Any]] | None = Field( + default=None, + description="Guardrails applied to validate/sanitize the input before the LLM call", + ) + + output: list[dict[str, Any]] | None = Field( + default=None, + description="Guardrails applied to validate/sanitize the output after the LLM call", + ) class ConfigBlob(SQLModel): """Raw JSON blob of config.""" completion: CompletionConfig = Field(..., description="Completion configuration") + + guardrails: GuardrailsConfig | None = Field( + default=None, + description="Optional guardrails configuration for input/output validation", + ) # Future additions: # classifier: ClassifierConfig | None = None # pre_filter: PreFilterConfig | None = None @@ -208,20 +225,6 @@ class LLMCallRequest(SQLModel): "in production, always use the id + version." ), ) - input_guardrails: list[dict[str, Any]] | None = Field( - default=None, - description=( - "Optional guardrails configuration to apply input validation. " - "If not provided, no guardrails will be applied." - ), - ) - output_guardrails: list[dict[str, Any]] | None = Field( - default=None, - description=( - "Optional guardrails configuration to apply output validation. " - "If not provided, no guardrails will be applied." - ), - ) callback_url: HttpUrl | None = Field( default=None, description="Webhook URL for async response delivery" ) diff --git a/backend/app/services/llm/guardrails.py b/backend/app/services/llm/guardrails.py index 37f0d1eb..414e7d3d 100644 --- a/backend/app/services/llm/guardrails.py +++ b/backend/app/services/llm/guardrails.py @@ -5,11 +5,12 @@ import httpx from app.core.config import settings +from app.models.llm.request import GuardrailsConfig logger = logging.getLogger(__name__) -def call_guardrails( +def run_guardrails_validation( input_text: str, guardrail_config: list[dict], job_id: UUID ) -> dict[str, Any]: """ @@ -47,7 +48,7 @@ def call_guardrails( return response.json() except Exception as e: logger.warning( - f"[call_guardrails] Service unavailable. Bypassing guardrails. job_id={job_id}. error={e}" + f"[run_guardrails_validation] Service unavailable. Bypassing guardrails. job_id={job_id}. error={e}" ) return { @@ -58,3 +59,100 @@ def call_guardrails( "rephrase_needed": False, }, } + + +def create_validators_batch( + validators: list[dict[str, Any]], + config_id: UUID | None, + organization_id: int | None, + project_id: int | None, +) -> list[dict[str, Any]]: + """ + Batch create validator configs via Kaapi Guardrails service. + + Args: + validators: List of validator creation payloads + config_id: Optional config UUID associated with this batch + + Returns: + List of created validator objects (includes UUIDs) + """ + + headers = { + "accept": "application/json", + "Authorization": f"Bearer {settings.KAAPI_GUARDRAILS_AUTH}", + "Content-Type": "application/json", + } + + try: + payload: dict[str, Any] | list[dict] + + if config_id is None: + raise ValueError( + "config_id must be provided" + ) + + payload = { + "config_id": str(config_id) if config_id is not None else None, + "validators": validators, + } + + logging.info( + f"[create_validators_batch] payload: {payload}" + ) + + with httpx.Client(timeout=10.0) as client: + response = client.post( + f"{settings.KAAPI_GUARDRAILS_URL}validators/configs/batch", + params={ + "organization_id": organization_id, + "project_id": project_id, + }, + json=payload, + headers=headers, + ) + + response.raise_for_status() + + data = response.json() + + return data["data"] + + except Exception as e: + logger.error( + f"[create_validators_batch] Failed to create validators. error={e}" + ) + raise + + +def build_staged_validators( + guardrails: GuardrailsConfig | None, +) -> list[dict[str, Any]]: + validators: list[dict[str, Any]] = [] + if guardrails is None: + return validators + + for validator in guardrails.input or []: + validators.append({"stage": "input", **validator}) + for validator in guardrails.output or []: + validators.append({"stage": "output", **validator}) + + return validators + + +def create_guardrails_validators_if_present( + guardrails: GuardrailsConfig | None, + guardrails_config_id: UUID, + organization_id: int | None, + project_id: int | None, +) -> None: + validators = build_staged_validators(guardrails) + if not validators: + return + + create_validators_batch( + validators=validators, + config_id=guardrails_config_id, + organization_id=organization_id, + project_id=project_id, + ) From c4abc0a9c2db6e3a783837ba24fc0c4c0961cd7b Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Thu, 12 Feb 2026 21:46:07 +0530 Subject: [PATCH 03/35] fixed integration with jobs --- backend/app/services/llm/guardrails.py | 63 +++++++++++++ backend/app/services/llm/guardrails_config.py | 78 ---------------- backend/app/services/llm/jobs.py | 92 +++++++++++-------- 3 files changed, 119 insertions(+), 114 deletions(-) delete mode 100644 backend/app/services/llm/guardrails_config.py diff --git a/backend/app/services/llm/guardrails.py b/backend/app/services/llm/guardrails.py index 414e7d3d..d1f579fb 100644 --- a/backend/app/services/llm/guardrails.py +++ b/backend/app/services/llm/guardrails.py @@ -125,6 +125,69 @@ def create_validators_batch( raise +def get_validators_config( + config_id: UUID | str, + organization_id: int | None, + project_id: int | None, +) -> tuple[list[dict[str, Any]], list[dict[str, Any]]]: + """ + Fetch validator configuration for a specific config id and split by stage. + + Calls: + GET /validators/configs/{config_id}?organization_id={organization_id}&project_id={project_id} + """ + headers = { + "accept": "application/json", + "Authorization": f"Bearer {settings.KAAPI_GUARDRAILS_AUTH}", + } + + endpoint = ( + f"{settings.KAAPI_GUARDRAILS_URL}validators/configs/{config_id}" + ) + + try: + with httpx.Client(timeout=10.0) as client: + response = client.get( + endpoint, + params={ + "organization_id": organization_id, + "project_id": project_id, + }, + headers=headers, + ) + response.raise_for_status() + + payload = response.json() + validators = payload.get("data", []) if isinstance(payload, dict) else [] + + if not isinstance(validators, list): + raise ValueError( + "Invalid validators response format: `data` must be a list." + ) + + input_guardrails = [ + validator + for validator in validators + if isinstance(validator, dict) + and str(validator.get("stage", "")).lower() == "input" + ] + output_guardrails = [ + validator + for validator in validators + if isinstance(validator, dict) + and str(validator.get("stage", "")).lower() == "output" + ] + + return input_guardrails, output_guardrails + + except Exception as e: + logger.error( + "[get_validators_config] Failed to fetch validator config. " + f"config_id={config_id}, organization_id={organization_id}, project_id={project_id}, error={e}" + ) + raise + + def build_staged_validators( guardrails: GuardrailsConfig | None, ) -> list[dict[str, Any]]: diff --git a/backend/app/services/llm/guardrails_config.py b/backend/app/services/llm/guardrails_config.py deleted file mode 100644 index 8e3113bf..00000000 --- a/backend/app/services/llm/guardrails_config.py +++ /dev/null @@ -1,78 +0,0 @@ -import logging - -import httpx - -from app.core.config import settings - -logger = logging.getLogger(__name__) - -def fetch_guardrails_config( - organization_id: int, project_id: int -) -> tuple[list[dict], list[dict]]: - """ - Fetch guardrail validators and split them into input/output by stage. - - Args: - organization_id: Organization id - project_id: Project id - - Retruns: - List of validators for the given organization id and project id. - """ - - headers = { - "accept": "application/json", - "Authorization": f"Bearer {settings.KAAPI_GUARDRAILS_AUTH}", - } - - try: - with httpx.Client(timeout=10.0) as client: - response = client.get( - settings.KAAPI_GUARDRAILS_CONFIG_URL, - params={ - "organization_id": organization_id, - "project_id": project_id, - }, - headers=headers, - ) - - response.raise_for_status() - payload = response.json() - - logger.info(f"Added payload: {payload}") - - if not payload.get("success"): - logger.warning( - "[fetch_guardrails_config] Guardrails config API returned unsuccessful response. " - f"organization_id={organization_id}, project_id={project_id}" - ) - return [], [] - - validators = payload.get("data") - if not isinstance(validators, list): - logger.warning( - "[fetch_guardrails_config] Guardrails config response has invalid `data` format. " - f"organization_id={organization_id}, project_id={project_id}" - ) - return [], [] - - input_guardrails = [ - validator - for validator in validators - if isinstance(validator, dict) - and str(validator.get("stage", "")).lower() == "input" - ] - output_guardrails = [ - validator - for validator in validators - if isinstance(validator, dict) - and str(validator.get("stage", "")).lower() == "output" - ] - return input_guardrails, output_guardrails - - except Exception as e: - logger.warning( - "[fetch_guardrails_config] Failed to fetch guardrails config. " - f"organization_id={organization_id}, project_id={project_id}, error={e}" - ) - return [], [] diff --git a/backend/app/services/llm/jobs.py b/backend/app/services/llm/jobs.py index a65d6262..24d92ece 100644 --- a/backend/app/services/llm/jobs.py +++ b/backend/app/services/llm/jobs.py @@ -13,8 +13,7 @@ from app.crud.jobs import JobCrud from app.models import JobStatus, JobType, JobUpdate, LLMCallRequest from app.models.llm.request import ConfigBlob, LLMCallConfig, KaapiCompletionConfig -from app.services.llm.guardrails import call_guardrails -from app.services.llm.guardrails_config import fetch_guardrails_config +from app.services.llm.guardrails import get_validators_config, run_guardrails_validation from app.services.llm.providers.registry import get_llm_provider from app.services.llm.mappers import transform_kaapi_config_to_native from app.utils import APIResponse, send_callback @@ -94,6 +93,19 @@ def resolve_config_blob( """ try: config_version = config_crud.exists_or_raise(version_number=config.version) + config_blob_data = dict(config_version.config_blob) + + if config_version.guardrails_config_id: + input_guardrails, output_guardrails = get_validators_config( + config_id=config_version.guardrails_config_id, + organization_id=config_crud.organization_id, + project_id=config_crud.project_id, + ) + config_blob_data["guardrails"] = { + "input": input_guardrails, + "output": output_guardrails, + } + except HTTPException as e: return None, f"Failed to retrieve stored configuration: {e.detail}" except Exception: @@ -105,7 +117,8 @@ def resolve_config_blob( return None, "Unexpected error occurred while retrieving stored configuration" try: - return ConfigBlob(**config_version.config_blob), None + config_blob, error = ConfigBlob(**config_blob_data), None + return config_blob, error except (TypeError, ValueError) as e: return None, f"Stored configuration blob is invalid: {str(e)}" except Exception: @@ -137,10 +150,6 @@ def execute_job( # one of (id, version) or blob is guaranteed to be present due to prior validation config = request.config input_query = request.query.input - input_guardrails, output_guardrails = fetch_guardrails_config( - organization_id=organization_id, - project_id=project_id, - ) callback_response = None config_blob: ConfigBlob | None = None @@ -149,8 +158,46 @@ def execute_job( ) try: + with Session(engine) as session: + # Update job status to PROCESSING + job_crud = JobCrud(session=session) + job_crud.update( + job_id=job_id, job_update=JobUpdate(status=JobStatus.PROCESSING) + ) + + # if stored config, fetch blob from DB + if config.is_stored_config: + config_crud = ConfigVersionCrud( + session=session, project_id=project_id, config_id=config.id, organization_id=organization_id + ) + + # blob is dynamic, need to resolve to ConfigBlob format + config_blob, error = resolve_config_blob(config_crud, config) + + if error: + callback_response = APIResponse.failure_response( + error=error, + metadata=request.request_metadata, + ) + return handle_job_error( + job_id, request.callback_url, callback_response + ) + if config_blob and config_blob.guardrails: + input_guardrails = config_blob.guardrails.input or [] + output_guardrails = config_blob.guardrails.output or [] + + else: + config_blob = config.blob + + if config_blob is not None and config_blob.guardrails is not None: + input_guardrails = config_blob.guardrails.input or [] + output_guardrails = config_blob.guardrails.output or [] + else: + input_guardrails = [] + output_guardrails = [] + if input_guardrails: - safe_input = call_guardrails(input_query, input_guardrails, job_id) + safe_input = run_guardrails_validation(input_query, input_guardrails, job_id) logger.info( f"[execute_job] Input guardrail validation | success={safe_input['success']}." @@ -179,33 +226,6 @@ def execute_job( ) return handle_job_error(job_id, request.callback_url, callback_response) - with Session(engine) as session: - # Update job status to PROCESSING - job_crud = JobCrud(session=session) - job_crud.update( - job_id=job_id, job_update=JobUpdate(status=JobStatus.PROCESSING) - ) - - # if stored config, fetch blob from DB - if config.is_stored_config: - config_crud = ConfigVersionCrud( - session=session, project_id=project_id, config_id=config.id - ) - - # blob is dynamic, need to resolve to ConfigBlob format - config_blob, error = resolve_config_blob(config_crud, config) - - if error: - callback_response = APIResponse.failure_response( - error=error, - metadata=request.request_metadata, - ) - return handle_job_error( - job_id, request.callback_url, callback_response - ) - - else: - config_blob = config.blob try: # Transform Kaapi config to native config if needed (before getting provider) @@ -265,7 +285,7 @@ def execute_job( if response: if output_guardrails: output_text = response.response.output.text - safe_output = call_guardrails(output_text, output_guardrails, job_id) + safe_output = run_guardrails_validation(output_text, output_guardrails, job_id) logger.info( f"[execute_job] Output guardrail validation | success={safe_output['success']}." From 82a93ff69e50039c7cfd288a8f12e8b02de4c473 Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Thu, 12 Feb 2026 21:46:53 +0530 Subject: [PATCH 04/35] removed env var --- backend/app/core/config.py | 1 - 1 file changed, 1 deletion(-) diff --git a/backend/app/core/config.py b/backend/app/core/config.py index c2d20993..a7cb7376 100644 --- a/backend/app/core/config.py +++ b/backend/app/core/config.py @@ -51,7 +51,6 @@ class Settings(BaseSettings): POSTGRES_DB: str = "" KAAPI_GUARDRAILS_AUTH: str = "" KAAPI_GUARDRAILS_URL: str = "" - KAAPI_GUARDRAILS_CONFIG_URL: str = "" @computed_field # type: ignore[prop-decorator] @property From 63edd8826270d1e6611ec02a5e473f247319969c Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Thu, 12 Feb 2026 21:48:33 +0530 Subject: [PATCH 05/35] removed redundant code --- backend/app/services/llm/jobs.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/backend/app/services/llm/jobs.py b/backend/app/services/llm/jobs.py index 24d92ece..82ebb830 100644 --- a/backend/app/services/llm/jobs.py +++ b/backend/app/services/llm/jobs.py @@ -182,9 +182,6 @@ def execute_job( return handle_job_error( job_id, request.callback_url, callback_response ) - if config_blob and config_blob.guardrails: - input_guardrails = config_blob.guardrails.input or [] - output_guardrails = config_blob.guardrails.output or [] else: config_blob = config.blob From 5c5753757d0be1e8e02ca3e28419d0872a015fd1 Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Thu, 12 Feb 2026 23:42:52 +0530 Subject: [PATCH 06/35] fixed tests --- backend/app/crud/config/config.py | 10 +- backend/app/crud/config/version.py | 12 +- backend/app/models/config/version.py | 4 +- backend/app/models/llm/request.py | 2 + backend/app/services/llm/guardrails.py | 14 +- backend/app/services/llm/jobs.py | 99 +++++++------ .../tests/api/routes/configs/test_config.py | 35 +++++ .../tests/api/routes/configs/test_version.py | 39 +++++ backend/app/tests/api/routes/test_llm.py | 33 +---- backend/app/tests/crud/config/test_config.py | 39 ++++- backend/app/tests/crud/config/test_version.py | 42 +++++- .../app/tests/services/llm/test_guardrails.py | 102 +++++++++---- backend/app/tests/services/llm/test_jobs.py | 135 +++++++++++------- 13 files changed, 396 insertions(+), 170 deletions(-) diff --git a/backend/app/crud/config/config.py b/backend/app/crud/config/config.py index 37e8c3e2..6fd488d8 100644 --- a/backend/app/crud/config/config.py +++ b/backend/app/crud/config/config.py @@ -22,7 +22,9 @@ class ConfigCrud: CRUD operations for configurations scoped to a project. """ - def __init__(self, session: Session, project_id: int, organization_id: Optional[int] = None): + def __init__( + self, session: Session, project_id: int, organization_id: Optional[int] = None + ): self.session = session self.project_id = project_id self.organization_id = organization_id @@ -36,7 +38,7 @@ def create_or_raise( self._check_unique_name_or_raise(config_create.name) try: - guardrails_config_id=uuid4() + guardrails_config_id = uuid4() config = Config( name=config_create.name, description=config_create.description, @@ -46,9 +48,7 @@ def create_or_raise( self.session.add(config) self.session.flush() # Flush to get the config.id - config_blob = config_create.config_blob.model_dump( - exclude={"guardrails"} - ) + config_blob = config_create.config_blob.model_dump(exclude={"guardrails"}) create_guardrails_validators_if_present( guardrails=config_create.config_blob.guardrails, guardrails_config_id=guardrails_config_id, diff --git a/backend/app/crud/config/version.py b/backend/app/crud/config/version.py index df449cbd..5f134fa3 100644 --- a/backend/app/crud/config/version.py +++ b/backend/app/crud/config/version.py @@ -19,7 +19,13 @@ class ConfigVersionCrud: CRUD operations for configuration versions scoped to a project. """ - def __init__(self, session: Session, config_id: UUID, project_id: int, organization_id: Optional[int] = None): + def __init__( + self, + session: Session, + config_id: UUID, + project_id: int, + organization_id: Optional[int] = None, + ): self.session = session self.project_id = project_id self.config_id = config_id @@ -33,7 +39,7 @@ def create_or_raise(self, version_create: ConfigVersionCreate) -> ConfigVersion: self._config_exists_or_raise(self.config_id) try: next_version = self._get_next_version(self.config_id) - guardrails_config_id=uuid4() + guardrails_config_id = uuid4() version = ConfigVersion( config_id=self.config_id, @@ -42,7 +48,7 @@ def create_or_raise(self, version_create: ConfigVersionCreate) -> ConfigVersion: exclude={"guardrails"} ), commit_message=version_create.commit_message, - guardrails_config_id=guardrails_config_id + guardrails_config_id=guardrails_config_id, ) create_guardrails_validators_if_present( diff --git a/backend/app/models/config/version.py b/backend/app/models/config/version.py index 988e5c74..c11a1627 100644 --- a/backend/app/models/config/version.py +++ b/backend/app/models/config/version.py @@ -65,7 +65,9 @@ class ConfigVersion(ConfigVersionBase, table=True): guardrails_config_id: UUID = Field( nullable=True, - sa_column_kwargs={"comment": "Reference to the kaapi_guardrails validator configuration"}, + sa_column_kwargs={ + "comment": "Reference to the kaapi_guardrails validator configuration" + }, ) version: int = Field( diff --git a/backend/app/models/llm/request.py b/backend/app/models/llm/request.py index 88252059..dcdb6188 100644 --- a/backend/app/models/llm/request.py +++ b/backend/app/models/llm/request.py @@ -119,6 +119,7 @@ class KaapiCompletionConfig(SQLModel): Field(discriminator="provider"), ] + class GuardrailsConfig(SQLModel): """Guardrails configuration applied during validation.""" @@ -132,6 +133,7 @@ class GuardrailsConfig(SQLModel): description="Guardrails applied to validate/sanitize the output after the LLM call", ) + class ConfigBlob(SQLModel): """Raw JSON blob of config.""" diff --git a/backend/app/services/llm/guardrails.py b/backend/app/services/llm/guardrails.py index d1f579fb..9ed186d9 100644 --- a/backend/app/services/llm/guardrails.py +++ b/backend/app/services/llm/guardrails.py @@ -63,7 +63,7 @@ def run_guardrails_validation( def create_validators_batch( validators: list[dict[str, Any]], - config_id: UUID | None, + config_id: UUID | None, organization_id: int | None, project_id: int | None, ) -> list[dict[str, Any]]: @@ -88,18 +88,14 @@ def create_validators_batch( payload: dict[str, Any] | list[dict] if config_id is None: - raise ValueError( - "config_id must be provided" - ) + raise ValueError("config_id must be provided") payload = { "config_id": str(config_id) if config_id is not None else None, "validators": validators, } - logging.info( - f"[create_validators_batch] payload: {payload}" - ) + logging.info(f"[create_validators_batch] payload: {payload}") with httpx.Client(timeout=10.0) as client: response = client.post( @@ -141,9 +137,7 @@ def get_validators_config( "Authorization": f"Bearer {settings.KAAPI_GUARDRAILS_AUTH}", } - endpoint = ( - f"{settings.KAAPI_GUARDRAILS_URL}validators/configs/{config_id}" - ) + endpoint = f"{settings.KAAPI_GUARDRAILS_URL}validators/configs/{config_id}" try: with httpx.Client(timeout=10.0) as client: diff --git a/backend/app/services/llm/jobs.py b/backend/app/services/llm/jobs.py index 82ebb830..bcc29602 100644 --- a/backend/app/services/llm/jobs.py +++ b/backend/app/services/llm/jobs.py @@ -95,16 +95,26 @@ def resolve_config_blob( config_version = config_crud.exists_or_raise(version_number=config.version) config_blob_data = dict(config_version.config_blob) - if config_version.guardrails_config_id: - input_guardrails, output_guardrails = get_validators_config( - config_id=config_version.guardrails_config_id, - organization_id=config_crud.organization_id, - project_id=config_crud.project_id, - ) - config_blob_data["guardrails"] = { - "input": input_guardrails, - "output": output_guardrails, - } + if ( + config_version.guardrails_config_id + and config_crud.organization_id is not None + and config_crud.project_id is not None + ): + try: + input_guardrails, output_guardrails = get_validators_config( + config_id=config_version.guardrails_config_id, + organization_id=config_crud.organization_id, + project_id=config_crud.project_id, + ) + config_blob_data["guardrails"] = { + "input": input_guardrails, + "output": output_guardrails, + } + except Exception as e: + logger.warning( + f"[resolve_config_blob] Failed to fetch guardrails validators for config version. " + f"guardrails_config_id={config_version.guardrails_config_id}, error={e}" + ) except HTTPException as e: return None, f"Failed to retrieve stored configuration: {e.detail}" @@ -147,11 +157,11 @@ def execute_job( request = LLMCallRequest(**request_data) job_id: UUID = UUID(job_id) - # one of (id, version) or blob is guaranteed to be present due to prior validation config = request.config - input_query = request.query.input callback_response = None config_blob: ConfigBlob | None = None + input_guardrails: list[dict] = [] + output_guardrails: list[dict] = [] logger.info( f"[execute_job] Starting LLM job execution | job_id={job_id}, task_id={task_id}, " @@ -168,7 +178,10 @@ def execute_job( # if stored config, fetch blob from DB if config.is_stored_config: config_crud = ConfigVersionCrud( - session=session, project_id=project_id, config_id=config.id, organization_id=organization_id + session=session, + project_id=project_id, + config_id=config.id, + organization_id=organization_id, ) # blob is dynamic, need to resolve to ConfigBlob format @@ -186,43 +199,45 @@ def execute_job( else: config_blob = config.blob - if config_blob is not None and config_blob.guardrails is not None: - input_guardrails = config_blob.guardrails.input or [] - output_guardrails = config_blob.guardrails.output or [] - else: - input_guardrails = [] - output_guardrails = [] + if config_blob is not None and config_blob.guardrails is not None: + input_guardrails = config_blob.guardrails.input or [] + output_guardrails = config_blob.guardrails.output or [] - if input_guardrails: - safe_input = run_guardrails_validation(input_query, input_guardrails, job_id) + if input_guardrails: + safe_input = run_guardrails_validation( + request.query.input, input_guardrails, job_id + ) - logger.info( - f"[execute_job] Input guardrail validation | success={safe_input['success']}." - ) + logger.info( + f"[execute_job] Input guardrail validation | success={safe_input['success']}." + ) + + if safe_input.get("bypassed"): + logger.info( + "[execute_job] Guardrails bypassed (service unavailable)" + ) - if safe_input.get("bypassed"): - logger.info("[execute_job] Guardrails bypassed (service unavailable)") + elif safe_input["success"]: + request.query.input = safe_input["data"]["safe_text"] - elif safe_input["success"]: - request.query.input = safe_input["data"]["safe_text"] + if safe_input["data"]["rephrase_needed"]: + callback_response = APIResponse.failure_response( + error=request.query.input, + metadata=request.request_metadata, + ) + return handle_job_error( + job_id, request.callback_url, callback_response + ) + else: + request.query.input = safe_input["error"] - if safe_input["data"]["rephrase_needed"]: callback_response = APIResponse.failure_response( - error=request.query.input, + error=safe_input["error"], metadata=request.request_metadata, ) return handle_job_error( job_id, request.callback_url, callback_response ) - else: - request.query.input = safe_input["error"] - - callback_response = APIResponse.failure_response( - error=safe_input["error"], - metadata=request.request_metadata, - ) - return handle_job_error(job_id, request.callback_url, callback_response) - try: # Transform Kaapi config to native config if needed (before getting provider) @@ -244,7 +259,7 @@ def execute_job( try: provider_instance = get_llm_provider( session=session, - provider_type=completion_config.provider, # Now always native provider type + provider_type=completion_config.provider, project_id=project_id, organization_id=organization_id, ) @@ -282,7 +297,9 @@ def execute_job( if response: if output_guardrails: output_text = response.response.output.text - safe_output = run_guardrails_validation(output_text, output_guardrails, job_id) + safe_output = run_guardrails_validation( + output_text, output_guardrails, job_id + ) logger.info( f"[execute_job] Output guardrail validation | success={safe_output['success']}." diff --git a/backend/app/tests/api/routes/configs/test_config.py b/backend/app/tests/api/routes/configs/test_config.py index 6953f738..e2b6ec88 100644 --- a/backend/app/tests/api/routes/configs/test_config.py +++ b/backend/app/tests/api/routes/configs/test_config.py @@ -1,4 +1,5 @@ from uuid import uuid4 +from unittest.mock import patch from fastapi.testclient import TestClient from sqlmodel import Session @@ -48,6 +49,40 @@ def test_create_config_success( assert data["data"]["version"]["config_blob"] == config_data["config_blob"] +def test_create_config_with_guardrails_excludes_guardrails_from_blob( + db: Session, + client: TestClient, + user_api_key: TestAuthContext, +) -> None: + config_data = { + "name": "test-llm-config-guardrails", + "description": "Config with guardrails", + "config_blob": { + "completion": { + "provider": "openai-native", + "params": {"model": "gpt-4"}, + }, + "guardrails": { + "input": [{"type": "pii_remover"}], + "output": [{"type": "gender_assumption_bias"}], + }, + }, + "commit_message": "Initial configuration", + } + + with patch("app.crud.config.config.create_guardrails_validators_if_present"): + response = client.post( + f"{settings.API_V1_STR}/configs/", + headers={"X-API-KEY": user_api_key.key}, + json=config_data, + ) + + assert response.status_code == 201 + data = response.json() + assert data["success"] is True + assert "guardrails" not in data["data"]["version"]["config_blob"] + + def test_create_config_empty_blob_fails( db: Session, client: TestClient, diff --git a/backend/app/tests/api/routes/configs/test_version.py b/backend/app/tests/api/routes/configs/test_version.py index 59223351..34acbceb 100644 --- a/backend/app/tests/api/routes/configs/test_version.py +++ b/backend/app/tests/api/routes/configs/test_version.py @@ -1,4 +1,5 @@ from uuid import uuid4 +from unittest.mock import patch from fastapi.testclient import TestClient from sqlmodel import Session @@ -57,6 +58,44 @@ def test_create_version_success( assert data["data"]["config_id"] == str(config.id) +def test_create_version_with_guardrails_excludes_guardrails_from_blob( + db: Session, + client: TestClient, + user_api_key: TestAuthContext, +) -> None: + config = create_test_config( + db=db, + project_id=user_api_key.project_id, + name="test-config-guardrails", + ) + + version_data = { + "config_blob": { + "completion": { + "provider": "openai-native", + "params": {"model": "gpt-4-turbo"}, + }, + "guardrails": { + "input": [{"type": "pii_remover"}], + "output": [{"type": "gender_assumption_bias"}], + }, + }, + "commit_message": "Guardrails config", + } + + with patch("app.crud.config.version.create_guardrails_validators_if_present"): + response = client.post( + f"{settings.API_V1_STR}/configs/{config.id}/versions", + headers={"X-API-KEY": user_api_key.key}, + json=version_data, + ) + + assert response.status_code == 201 + data = response.json() + assert data["success"] is True + assert "guardrails" not in data["data"]["config_blob"] + + def test_create_version_empty_blob_fails( db: Session, client: TestClient, diff --git a/backend/app/tests/api/routes/test_llm.py b/backend/app/tests/api/routes/test_llm.py index 279911d8..6fac7a86 100644 --- a/backend/app/tests/api/routes/test_llm.py +++ b/backend/app/tests/api/routes/test_llm.py @@ -171,21 +171,9 @@ def test_llm_call_success_with_guardrails( ) -> None: """Test successful LLM call when guardrails are enabled (no validators).""" - with ( - patch("app.services.llm.jobs.start_high_priority_job") as mock_start_job, - patch("app.services.llm.guardrails.call_guardrails") as mock_guardrails, - ): + with patch("app.services.llm.jobs.start_high_priority_job") as mock_start_job: mock_start_job.return_value = "test-task-id" - mock_guardrails.return_value = { - "success": True, - "bypassed": False, - "data": { - "safe_text": "What is the capital of France?", - "rephrase_needed": False, - }, - } - payload = LLMCallRequest( query=QueryParams(input="What is the capital of France?"), config=LLMCallConfig( @@ -199,8 +187,6 @@ def test_llm_call_success_with_guardrails( ) ) ), - input_guardrails=[], - output_guardrails=[], callback_url="https://example.com/callback", ) @@ -217,7 +203,6 @@ def test_llm_call_success_with_guardrails( assert "response is being generated" in body["data"]["message"] mock_start_job.assert_called_once() - mock_guardrails.assert_not_called() def test_llm_call_guardrails_bypassed_still_succeeds( @@ -226,21 +211,9 @@ def test_llm_call_guardrails_bypassed_still_succeeds( ) -> None: """If guardrails service is unavailable (bypassed), request should still succeed.""" - with ( - patch("app.services.llm.jobs.start_high_priority_job") as mock_start_job, - patch("app.services.llm.guardrails.call_guardrails") as mock_guardrails, - ): + with patch("app.services.llm.jobs.start_high_priority_job") as mock_start_job: mock_start_job.return_value = "test-task-id" - mock_guardrails.return_value = { - "success": True, - "bypassed": True, - "data": { - "safe_text": "What is the capital of France?", - "rephrase_needed": False, - }, - } - payload = LLMCallRequest( query=QueryParams(input="What is the capital of France?"), config=LLMCallConfig( @@ -254,8 +227,6 @@ def test_llm_call_guardrails_bypassed_still_succeeds( ) ) ), - input_guardrails=[{"type": "pii_remover"}], - output_guardrails=[], callback_url="https://example.com/callback", ) diff --git a/backend/app/tests/crud/config/test_config.py b/backend/app/tests/crud/config/test_config.py index 0267c058..29dec655 100644 --- a/backend/app/tests/crud/config/test_config.py +++ b/backend/app/tests/crud/config/test_config.py @@ -1,4 +1,5 @@ from uuid import uuid4 +from unittest.mock import patch import pytest from sqlmodel import Session @@ -55,10 +56,46 @@ def test_create_config(db: Session, example_config_blob: ConfigBlob) -> None: assert version.id is not None assert version.config_id == config.id assert version.version == 1 - assert version.config_blob == example_config_blob.model_dump() + assert version.config_blob == example_config_blob.model_dump(exclude={"guardrails"}) assert version.commit_message == "Initial version" +def test_create_config_with_guardrails_excludes_guardrails_from_blob( + db: Session, +) -> None: + project = create_test_project(db) + config_crud = ConfigCrud( + session=db, + project_id=project.id, + organization_id=project.organization_id, + ) + + config_create = ConfigCreate( + name=f"test-config-{random_lower_string()}", + description="Test configuration", + config_blob=ConfigBlob( + completion=NativeCompletionConfig( + provider="openai-native", + params={"model": "gpt-4"}, + ), + guardrails={ + "input": [{"type": "pii_remover"}], + "output": [{"type": "gender_assumption_bias"}], + }, + ), + commit_message="Initial version", + ) + + with patch( + "app.crud.config.config.create_guardrails_validators_if_present" + ) as mock_create_guardrails: + _, version = config_crud.create_or_raise(config_create) + + mock_create_guardrails.assert_called_once() + assert "guardrails" not in version.config_blob + assert version.guardrails_config_id is not None + + def test_create_config_duplicate_name( db: Session, example_config_blob: ConfigBlob ) -> None: diff --git a/backend/app/tests/crud/config/test_version.py b/backend/app/tests/crud/config/test_version.py index 8c6fa8ea..eda0b6ea 100644 --- a/backend/app/tests/crud/config/test_version.py +++ b/backend/app/tests/crud/config/test_version.py @@ -1,4 +1,5 @@ from uuid import uuid4 +from unittest.mock import patch import pytest from sqlmodel import Session @@ -35,7 +36,7 @@ def test_create_version(db: Session, example_config_blob: ConfigBlob) -> None: session=db, project_id=config.project_id, config_id=config.id ) - config_blob = example_config_blob.model_dump() + config_blob = example_config_blob.model_dump(exclude={"guardrails"}) version_create = ConfigVersionCreate( config_blob=config_blob, commit_message="Updated model and parameters", @@ -51,6 +52,41 @@ def test_create_version(db: Session, example_config_blob: ConfigBlob) -> None: assert version.deleted_at is None +def test_create_version_with_guardrails_excludes_guardrails_from_blob( + db: Session, +) -> None: + config = create_test_config(db) + version_crud = ConfigVersionCrud( + session=db, + project_id=config.project_id, + config_id=config.id, + organization_id=1, + ) + + version_create = ConfigVersionCreate( + config_blob=ConfigBlob( + completion=NativeCompletionConfig( + provider="openai-native", + params={"model": "gpt-4"}, + ), + guardrails={ + "input": [{"type": "pii_remover"}], + "output": [{"type": "gender_assumption_bias"}], + }, + ), + commit_message="Guardrails version", + ) + + with patch( + "app.crud.config.version.create_guardrails_validators_if_present" + ) as mock_create_guardrails: + version = version_crud.create_or_raise(version_create) + + mock_create_guardrails.assert_called_once() + assert "guardrails" not in version.config_blob + assert version.guardrails_config_id is not None + + def test_create_version_auto_increment( db: Session, example_config_blob: ConfigBlob ) -> None: @@ -118,7 +154,9 @@ def test_read_one_version(db: Session, example_config_blob: ConfigBlob) -> None: assert fetched_version.id == version.id assert fetched_version.version == version.version assert fetched_version.config_id == config.id - assert fetched_version.config_blob == example_config_blob.model_dump() + assert fetched_version.config_blob == example_config_blob.model_dump( + exclude={"guardrails"} + ) def test_read_one_version_not_found(db: Session) -> None: diff --git a/backend/app/tests/services/llm/test_guardrails.py b/backend/app/tests/services/llm/test_guardrails.py index 4443aeca..ec71cbc3 100644 --- a/backend/app/tests/services/llm/test_guardrails.py +++ b/backend/app/tests/services/llm/test_guardrails.py @@ -1,11 +1,15 @@ import uuid from unittest.mock import MagicMock, patch -import pytest import httpx -from app.services.llm.guardrails import call_guardrails from app.core.config import settings +from app.services.llm.guardrails import ( + create_guardrails_validators_if_present, + create_validators_batch, + get_validators_config, + run_guardrails_validation, +) TEST_JOB_ID = uuid.uuid4() @@ -14,7 +18,7 @@ @patch("app.services.llm.guardrails.httpx.Client") -def test_call_guardrails_success(mock_client_cls) -> None: +def test_run_guardrails_validation_success(mock_client_cls) -> None: mock_response = MagicMock() mock_response.json.return_value = {"success": True} mock_response.raise_for_status.return_value = None @@ -23,23 +27,21 @@ def test_call_guardrails_success(mock_client_cls) -> None: mock_client.post.return_value = mock_response mock_client_cls.return_value.__enter__.return_value = mock_client - result = call_guardrails(TEST_TEXT, TEST_CONFIG, TEST_JOB_ID) + result = run_guardrails_validation(TEST_TEXT, TEST_CONFIG, TEST_JOB_ID) assert result == {"success": True} mock_client.post.assert_called_once() - args, kwargs = mock_client.post.call_args - + _, kwargs = mock_client.post.call_args assert kwargs["json"]["input"] == TEST_TEXT assert kwargs["json"]["validators"] == TEST_CONFIG assert kwargs["json"]["request_id"] == str(TEST_JOB_ID) - assert kwargs["headers"]["Authorization"].startswith("Bearer ") assert kwargs["headers"]["Content-Type"] == "application/json" @patch("app.services.llm.guardrails.httpx.Client") -def test_call_guardrails_http_error_bypasses(mock_client_cls) -> None: +def test_run_guardrails_validation_http_error_bypasses(mock_client_cls) -> None: mock_response = MagicMock() mock_response.raise_for_status.side_effect = httpx.HTTPStatusError( "bad", request=None, response=None @@ -49,7 +51,7 @@ def test_call_guardrails_http_error_bypasses(mock_client_cls) -> None: mock_client.post.return_value = mock_response mock_client_cls.return_value.__enter__.return_value = mock_client - result = call_guardrails(TEST_TEXT, TEST_CONFIG, TEST_JOB_ID) + result = run_guardrails_validation(TEST_TEXT, TEST_CONFIG, TEST_JOB_ID) assert result["success"] is False assert result["bypassed"] is True @@ -57,42 +59,92 @@ def test_call_guardrails_http_error_bypasses(mock_client_cls) -> None: @patch("app.services.llm.guardrails.httpx.Client") -def test_call_guardrails_network_failure_bypasses(mock_client_cls) -> None: +def test_run_guardrails_validation_uses_settings(mock_client_cls) -> None: + mock_response = MagicMock() + mock_response.raise_for_status.return_value = None + mock_response.json.return_value = {"ok": True} + mock_client = MagicMock() - mock_client.post.side_effect = httpx.ConnectError("failed") + mock_client.post.return_value = mock_response mock_client_cls.return_value.__enter__.return_value = mock_client - result = call_guardrails(TEST_TEXT, TEST_CONFIG, TEST_JOB_ID) + run_guardrails_validation(TEST_TEXT, TEST_CONFIG, TEST_JOB_ID) - assert result["bypassed"] is True - assert result["data"]["safe_text"] == TEST_TEXT + _, kwargs = mock_client.post.call_args + assert ( + kwargs["headers"]["Authorization"] == f"Bearer {settings.KAAPI_GUARDRAILS_AUTH}" + ) @patch("app.services.llm.guardrails.httpx.Client") -def test_call_guardrails_timeout_bypasses(mock_client_cls) -> None: +def test_create_validators_batch_success(mock_client_cls) -> None: + validators = [{"stage": "input", "type": "pii_remover"}] + config_id = uuid.uuid4() + + mock_response = MagicMock() + mock_response.raise_for_status.return_value = None + mock_response.json.return_value = {"success": True, "data": [{"id": "v1"}]} + mock_client = MagicMock() - mock_client.post.side_effect = httpx.TimeoutException("timeout") + mock_client.post.return_value = mock_response mock_client_cls.return_value.__enter__.return_value = mock_client - result = call_guardrails(TEST_TEXT, TEST_CONFIG, TEST_JOB_ID) + result = create_validators_batch( + validators=validators, + config_id=config_id, + organization_id=1, + project_id=2, + ) - assert result["bypassed"] is True + assert result == [{"id": "v1"}] + _, kwargs = mock_client.post.call_args + assert kwargs["json"]["config_id"] == str(config_id) + assert kwargs["json"]["validators"] == validators + assert kwargs["params"] == {"organization_id": 1, "project_id": 2} @patch("app.services.llm.guardrails.httpx.Client") -def test_call_guardrails_uses_settings(mock_client_cls) -> None: +def test_get_validators_config_splits_input_output(mock_client_cls) -> None: + config_id = uuid.uuid4() + mock_response = MagicMock() mock_response.raise_for_status.return_value = None - mock_response.json.return_value = {"ok": True} + mock_response.json.return_value = { + "success": True, + "data": [ + {"type": "gender_assumption_bias", "stage": "output"}, + { + "type": "uli_slur_match", + "stage": "input", + "config": {"severity": "high"}, + }, + {"type": "pii_remover", "stage": "input"}, + ], + } mock_client = MagicMock() - mock_client.post.return_value = mock_response + mock_client.get.return_value = mock_response mock_client_cls.return_value.__enter__.return_value = mock_client - call_guardrails(TEST_TEXT, TEST_CONFIG, TEST_JOB_ID) + input_guardrails, output_guardrails = get_validators_config( + config_id=config_id, + organization_id=1, + project_id=1, + ) - _, kwargs = mock_client.post.call_args + assert len(input_guardrails) == 2 + assert len(output_guardrails) == 1 + assert all(g["stage"] == "input" for g in input_guardrails) + assert all(g["stage"] == "output" for g in output_guardrails) - assert ( - kwargs["headers"]["Authorization"] == f"Bearer {settings.KAAPI_GUARDRAILS_AUTH}" + +@patch("app.services.llm.guardrails.create_validators_batch") +def test_create_guardrails_validators_if_present_noop_on_none(mock_batch) -> None: + create_guardrails_validators_if_present( + guardrails=None, + guardrails_config_id=uuid.uuid4(), + organization_id=1, + project_id=1, ) + + mock_batch.assert_not_called() diff --git a/backend/app/tests/services/llm/test_jobs.py b/backend/app/tests/services/llm/test_jobs.py index 3daadad2..1f9a5dd9 100644 --- a/backend/app/tests/services/llm/test_jobs.py +++ b/backend/app/tests/services/llm/test_jobs.py @@ -255,10 +255,6 @@ def job_env(self, db, mock_llm_response): patch("app.services.llm.jobs.Session") as mock_session_class, patch("app.services.llm.jobs.get_llm_provider") as mock_get_provider, patch("app.services.llm.jobs.send_callback") as mock_send_callback, - patch( - "app.services.llm.jobs.fetch_guardrails_config", - return_value=([], []), - ), ): mock_session_class.return_value.__enter__.return_value = db mock_session_class.return_value.__exit__.return_value = None @@ -739,7 +735,9 @@ def test_guardrails_sanitize_input_before_provider( unsafe_input = "My credit card is 4111 1111 1111 1111" - with patch("app.services.llm.jobs.call_guardrails") as mock_guardrails: + with patch( + "app.services.llm.jobs.run_guardrails_validation" + ) as mock_guardrails: mock_guardrails.return_value = { "success": True, "bypassed": False, @@ -756,20 +754,17 @@ def test_guardrails_sanitize_input_before_provider( "completion": { "provider": "openai-native", "params": {"model": "gpt-4"}, - } + }, + "guardrails": { + "input": [{"type": "pii_remover"}], + "output": [], + }, } }, - "input_guardrails": [{"type": "pii_remover"}], - "output_guardrails": [], "include_provider_raw_response": False, "callback_url": None, } - - with patch( - "app.services.llm.jobs.fetch_guardrails_config", - return_value=([{"type": "pii_remover"}], []), - ): - result = self._execute_job(job_for_execution, db, request_data) + result = self._execute_job(job_for_execution, db, request_data) provider_query = env["provider"].execute.call_args[0][1] assert "[REDACTED]" in provider_query.input @@ -785,7 +780,9 @@ def test_guardrails_sanitize_output_after_provider( env["mock_llm_response"].response.output.text = "Aadhar no 123-45-6789" env["provider"].execute.return_value = (env["mock_llm_response"], None) - with patch("app.services.llm.jobs.call_guardrails") as mock_guardrails: + with patch( + "app.services.llm.jobs.run_guardrails_validation" + ) as mock_guardrails: mock_guardrails.return_value = { "success": True, "bypassed": False, @@ -802,18 +799,15 @@ def test_guardrails_sanitize_output_after_provider( "completion": { "provider": "openai-native", "params": {"model": "gpt-4"}, - } + }, + "guardrails": { + "input": [], + "output": [{"type": "pii_remover"}], + }, } }, - "input_guardrails": [], - "output_guardrails": [{"type": "pii_remover"}], } - - with patch( - "app.services.llm.jobs.fetch_guardrails_config", - return_value=([], [{"type": "pii_remover"}]), - ): - result = self._execute_job(job_for_execution, db, request_data) + result = self._execute_job(job_for_execution, db, request_data) assert "REDACTED" in result["data"]["response"]["output"]["text"] @@ -826,7 +820,9 @@ def test_guardrails_bypass_does_not_modify_input( unsafe_input = "4111 1111 1111 1111" - with patch("app.services.llm.jobs.call_guardrails") as mock_guardrails: + with patch( + "app.services.llm.jobs.run_guardrails_validation" + ) as mock_guardrails: mock_guardrails.return_value = { "success": True, "bypassed": True, @@ -843,17 +839,15 @@ def test_guardrails_bypass_does_not_modify_input( "completion": { "provider": "openai-native", "params": {"model": "gpt-4"}, - } + }, + "guardrails": { + "input": [{"type": "pii_remover"}], + "output": [], + }, } }, - "input_guardrails": [{"type": "pii_remover"}], } - - with patch( - "app.services.llm.jobs.fetch_guardrails_config", - return_value=([{"type": "pii_remover"}], []), - ): - self._execute_job(job_for_execution, db, request_data) + self._execute_job(job_for_execution, db, request_data) provider_query = env["provider"].execute.call_args[0][1] assert provider_query.input == unsafe_input @@ -863,7 +857,9 @@ def test_guardrails_validation_failure_blocks_job( ): env = job_env - with patch("app.services.llm.jobs.call_guardrails") as mock_guardrails: + with patch( + "app.services.llm.jobs.run_guardrails_validation" + ) as mock_guardrails: mock_guardrails.return_value = { "success": False, "error": "Unsafe content detected", @@ -876,17 +872,15 @@ def test_guardrails_validation_failure_blocks_job( "completion": { "provider": "openai-native", "params": {"model": "gpt-4"}, - } + }, + "guardrails": { + "input": [{"type": "uli_slur_match"}], + "output": [], + }, } }, - "input_guardrails": [{"type": "uli_slur_match"}], } - - with patch( - "app.services.llm.jobs.fetch_guardrails_config", - return_value=([{"type": "uli_slur_match"}], []), - ): - result = self._execute_job(job_for_execution, db, request_data) + result = self._execute_job(job_for_execution, db, request_data) assert not result["success"] assert "Unsafe content" in result["error"] @@ -897,7 +891,9 @@ def test_guardrails_rephrase_needed_blocks_job( ): env = job_env - with patch("app.services.llm.jobs.call_guardrails") as mock_guardrails: + with patch( + "app.services.llm.jobs.run_guardrails_validation" + ) as mock_guardrails: mock_guardrails.return_value = { "success": True, "bypassed": False, @@ -914,17 +910,15 @@ def test_guardrails_rephrase_needed_blocks_job( "completion": { "provider": "openai-native", "params": {"model": "gpt-4"}, - } + }, + "guardrails": { + "input": [{"type": "policy"}], + "output": [], + }, } }, - "input_guardrails": [{"type": "policy"}], } - - with patch( - "app.services.llm.jobs.fetch_guardrails_config", - return_value=([{"type": "policy"}], []), - ): - result = self._execute_job(job_for_execution, db, request_data) + result = self._execute_job(job_for_execution, db, request_data) assert not result["success"] env["provider"].execute.assert_not_called() @@ -959,6 +953,45 @@ def test_resolve_config_blob_success(self, db: Session): assert resolved_blob.completion.params["model"] == "gpt-4" assert resolved_blob.completion.params["temperature"] == 0.8 + def test_resolve_config_blob_fetches_guardrails_by_guardrails_config_id( + self, db: Session + ): + project = get_project(db) + config = create_test_config(db, project_id=project.id) + db.commit() + + statement = select(ConfigVersion).where( + (ConfigVersion.config_id == config.id) & (ConfigVersion.version == 1) + ) + config_version = db.exec(statement).one() + + config_crud = ConfigVersionCrud( + session=db, project_id=project.id, config_id=config.id, organization_id=1 + ) + llm_call_config = LLMCallConfig(id=str(config.id), version=1) + + with patch("app.services.llm.jobs.get_validators_config") as mock_fetch: + mock_fetch.return_value = ( + [{"type": "pii_remover", "stage": "input"}], + [{"type": "gender_assumption_bias", "stage": "output"}], + ) + resolved_blob, error = resolve_config_blob(config_crud, llm_call_config) + + assert error is None + assert resolved_blob is not None + assert resolved_blob.guardrails is not None + assert resolved_blob.guardrails.input == [ + {"type": "pii_remover", "stage": "input"} + ] + assert resolved_blob.guardrails.output == [ + {"type": "gender_assumption_bias", "stage": "output"} + ] + mock_fetch.assert_called_once_with( + config_id=config_version.guardrails_config_id, + organization_id=1, + project_id=project.id, + ) + def test_resolve_config_blob_version_not_found(self, db: Session): """Test resolve_config_blob when version doesn't exist.""" project = get_project(db) From c9ea457fc8dafa98a1e00c5499cbbf747f8c062c Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Fri, 13 Feb 2026 00:45:08 +0530 Subject: [PATCH 07/35] resolved comments --- backend/app/models/config/version.py | 3 ++- backend/app/services/llm/guardrails.py | 20 +++++++++++++++++--- 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/backend/app/models/config/version.py b/backend/app/models/config/version.py index c11a1627..895d5b2f 100644 --- a/backend/app/models/config/version.py +++ b/backend/app/models/config/version.py @@ -63,7 +63,8 @@ class ConfigVersion(ConfigVersionBase, table=True): sa_column_kwargs={"comment": "Reference to the parent configuration"}, ) - guardrails_config_id: UUID = Field( + guardrails_config_id: UUID | None = Field( + default=None, nullable=True, sa_column_kwargs={ "comment": "Reference to the kaapi_guardrails validator configuration" diff --git a/backend/app/services/llm/guardrails.py b/backend/app/services/llm/guardrails.py index 9ed186d9..6fe053cf 100644 --- a/backend/app/services/llm/guardrails.py +++ b/backend/app/services/llm/guardrails.py @@ -95,11 +95,15 @@ def create_validators_batch( "validators": validators, } - logging.info(f"[create_validators_batch] payload: {payload}") + logger.info( + "[create_validators_batch] Requesting validator batch creation. " + f"config_id={config_id}, organization_id={organization_id}, " + f"project_id={project_id}, validators_count={len(validators)}" + ) with httpx.Client(timeout=10.0) as client: response = client.post( - f"{settings.KAAPI_GUARDRAILS_URL}validators/configs/batch", + f"{settings.KAAPI_GUARDRAILS_URL}/validators/configs/batch", params={ "organization_id": organization_id, "project_id": project_id, @@ -111,8 +115,18 @@ def create_validators_batch( response.raise_for_status() data = response.json() + if not isinstance(data, dict): + raise ValueError( + "Invalid response format from guardrails service: expected object." + ) + + validators_data = data.get("data") + if not isinstance(validators_data, list): + raise ValueError( + "Invalid response format from guardrails service: `data` must be a list." + ) - return data["data"] + return validators_data except Exception as e: logger.error( From 155fea7f78a1dd0fa9399f400c93005d31454690 Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Fri, 13 Feb 2026 00:46:46 +0530 Subject: [PATCH 08/35] resolved comment --- backend/app/tests/crud/config/test_version.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/backend/app/tests/crud/config/test_version.py b/backend/app/tests/crud/config/test_version.py index eda0b6ea..df238aba 100644 --- a/backend/app/tests/crud/config/test_version.py +++ b/backend/app/tests/crud/config/test_version.py @@ -5,7 +5,7 @@ from sqlmodel import Session from fastapi import HTTPException -from app.models import ConfigVersionCreate, ConfigBlob +from app.models import ConfigVersionCreate, ConfigBlob, Project from app.models.llm.request import NativeCompletionConfig from app.crud.config import ConfigVersionCrud from app.tests.utils.test_data import ( @@ -56,11 +56,13 @@ def test_create_version_with_guardrails_excludes_guardrails_from_blob( db: Session, ) -> None: config = create_test_config(db) + project = db.get(Project, config.project_id) + assert project is not None version_crud = ConfigVersionCrud( session=db, project_id=config.project_id, config_id=config.id, - organization_id=1, + organization_id=project.organization_id, ) version_create = ConfigVersionCreate( From 60e0bec680600c166a3087cde83bbf752e033a6e Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Fri, 13 Feb 2026 01:05:53 +0530 Subject: [PATCH 09/35] updated url --- backend/app/services/llm/guardrails.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/app/services/llm/guardrails.py b/backend/app/services/llm/guardrails.py index 6fe053cf..8d8ca74c 100644 --- a/backend/app/services/llm/guardrails.py +++ b/backend/app/services/llm/guardrails.py @@ -151,7 +151,7 @@ def get_validators_config( "Authorization": f"Bearer {settings.KAAPI_GUARDRAILS_AUTH}", } - endpoint = f"{settings.KAAPI_GUARDRAILS_URL}validators/configs/{config_id}" + endpoint = f"{settings.KAAPI_GUARDRAILS_URL}/validators/configs/{config_id}" try: with httpx.Client(timeout=10.0) as client: From d49df4a0d73ff11996b54d052a734bc8489a49e2 Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Mon, 16 Feb 2026 14:20:22 +0530 Subject: [PATCH 10/35] update code and fixed tests --- ..._guardrails_config_id_to_config_version.py | 32 ---- backend/app/crud/config/config.py | 18 +- backend/app/crud/config/version.py | 16 +- backend/app/models/config/version.py | 11 +- backend/app/models/llm/request.py | 24 +-- backend/app/services/llm/guardrails.py | 70 ++++---- backend/app/services/llm/jobs.py | 43 ++--- .../tests/api/routes/configs/test_config.py | 35 ++-- .../tests/api/routes/configs/test_version.py | 34 ++-- backend/app/tests/crud/config/test_config.py | 22 +-- backend/app/tests/crud/config/test_version.py | 26 +-- .../app/tests/services/llm/test_guardrails.py | 45 +++-- backend/app/tests/services/llm/test_jobs.py | 155 +++++++++++------- 13 files changed, 245 insertions(+), 286 deletions(-) delete mode 100644 backend/app/alembic/versions/046_add_guardrails_config_id_to_config_version.py diff --git a/backend/app/alembic/versions/046_add_guardrails_config_id_to_config_version.py b/backend/app/alembic/versions/046_add_guardrails_config_id_to_config_version.py deleted file mode 100644 index 67704925..00000000 --- a/backend/app/alembic/versions/046_add_guardrails_config_id_to_config_version.py +++ /dev/null @@ -1,32 +0,0 @@ -"""Add guardrails_config_id to config_version - -Revision ID: 046 -Revises: 045 -Create Date: 2026-02-12 12:30:00.000000 - -""" -from alembic import op -import sqlalchemy as sa - - -# revision identifiers, used by Alembic. -revision = "046" -down_revision = "045" -branch_labels = None -depends_on = None - - -def upgrade(): - op.add_column( - "config_version", - sa.Column( - "guardrails_config_id", - sa.Uuid(), - nullable=True, - comment="Reference to the kaapi_guardrails validator configuration", - ), - ) - - -def downgrade(): - op.drop_column("config_version", "guardrails_config_id") diff --git a/backend/app/crud/config/config.py b/backend/app/crud/config/config.py index 6fd488d8..e3288e19 100644 --- a/backend/app/crud/config/config.py +++ b/backend/app/crud/config/config.py @@ -1,5 +1,5 @@ import logging -from uuid import UUID, uuid4 +from uuid import UUID from typing import Optional, Tuple from sqlmodel import Session, select, and_ @@ -12,7 +12,6 @@ ConfigVersion, ) from app.core.util import now -from app.services.llm.guardrails import create_guardrails_validators_if_present logger = logging.getLogger(__name__) @@ -38,7 +37,6 @@ def create_or_raise( self._check_unique_name_or_raise(config_create.name) try: - guardrails_config_id = uuid4() config = Config( name=config_create.name, description=config_create.description, @@ -48,22 +46,12 @@ def create_or_raise( self.session.add(config) self.session.flush() # Flush to get the config.id - config_blob = config_create.config_blob.model_dump(exclude={"guardrails"}) - create_guardrails_validators_if_present( - guardrails=config_create.config_blob.guardrails, - guardrails_config_id=guardrails_config_id, - organization_id=self.organization_id, - project_id=self.project_id, - ) - - # Create the initial version. Guardrails are stored externally via - # guardrails_config_id and should not be persisted in config_blob. + # Create the initial version version = ConfigVersion( config_id=config.id, version=1, - config_blob=config_blob, + config_blob=config_create.config_blob.model_dump(), commit_message=config_create.commit_message, - guardrails_config_id=guardrails_config_id, ) self.session.add(version) diff --git a/backend/app/crud/config/version.py b/backend/app/crud/config/version.py index 5f134fa3..5772af7c 100644 --- a/backend/app/crud/config/version.py +++ b/backend/app/crud/config/version.py @@ -1,6 +1,6 @@ import logging from typing import Optional -from uuid import UUID, uuid4 +from uuid import UUID from sqlmodel import Session, select, and_, func from fastapi import HTTPException @@ -9,7 +9,6 @@ from .config import ConfigCrud from app.core.util import now from app.models import Config, ConfigVersion, ConfigVersionCreate, ConfigVersionItems -from app.services.llm.guardrails import create_guardrails_validators_if_present logger = logging.getLogger(__name__) @@ -39,23 +38,12 @@ def create_or_raise(self, version_create: ConfigVersionCreate) -> ConfigVersion: self._config_exists_or_raise(self.config_id) try: next_version = self._get_next_version(self.config_id) - guardrails_config_id = uuid4() version = ConfigVersion( config_id=self.config_id, version=next_version, - config_blob=version_create.config_blob.model_dump( - exclude={"guardrails"} - ), + config_blob=version_create.config_blob.model_dump(), commit_message=version_create.commit_message, - guardrails_config_id=guardrails_config_id, - ) - - create_guardrails_validators_if_present( - guardrails=version_create.config_blob.guardrails, - guardrails_config_id=guardrails_config_id, - organization_id=self.organization_id, - project_id=self.project_id, ) self.session.add(version) diff --git a/backend/app/models/config/version.py b/backend/app/models/config/version.py index 895d5b2f..cd6c4fa2 100644 --- a/backend/app/models/config/version.py +++ b/backend/app/models/config/version.py @@ -62,15 +62,6 @@ class ConfigVersion(ConfigVersionBase, table=True): ondelete="CASCADE", sa_column_kwargs={"comment": "Reference to the parent configuration"}, ) - - guardrails_config_id: UUID | None = Field( - default=None, - nullable=True, - sa_column_kwargs={ - "comment": "Reference to the kaapi_guardrails validator configuration" - }, - ) - version: int = Field( nullable=False, description="Version number starting at 1", @@ -125,4 +116,4 @@ class ConfigVersionItems(SQLModel): description="Optional message describing the changes in this version", ) inserted_at: datetime - updated_at: datetime + updated_at: datetime \ No newline at end of file diff --git a/backend/app/models/llm/request.py b/backend/app/models/llm/request.py index dcdb6188..7386b461 100644 --- a/backend/app/models/llm/request.py +++ b/backend/app/models/llm/request.py @@ -119,19 +119,8 @@ class KaapiCompletionConfig(SQLModel): Field(discriminator="provider"), ] - -class GuardrailsConfig(SQLModel): - """Guardrails configuration applied during validation.""" - - input: list[dict[str, Any]] | None = Field( - default=None, - description="Guardrails applied to validate/sanitize the input before the LLM call", - ) - - output: list[dict[str, Any]] | None = Field( - default=None, - description="Guardrails applied to validate/sanitize the output after the LLM call", - ) +class Validator(SQLModel): + validator_config_id: int = Field(..., description="Validator config") class ConfigBlob(SQLModel): @@ -139,9 +128,14 @@ class ConfigBlob(SQLModel): completion: CompletionConfig = Field(..., description="Completion configuration") - guardrails: GuardrailsConfig | None = Field( + input_guardrails: list[Validator] | None = Field( default=None, - description="Optional guardrails configuration for input/output validation", + description="Guardrails applied to validate/sanitize the input before the LLM call", + ) + + output_guardrails: list[Validator] | None = Field( + default=None, + description="Guardrails applied to validate/sanitize the output after the LLM call", ) # Future additions: # classifier: ClassifierConfig | None = None diff --git a/backend/app/services/llm/guardrails.py b/backend/app/services/llm/guardrails.py index 8d8ca74c..78da9c5f 100644 --- a/backend/app/services/llm/guardrails.py +++ b/backend/app/services/llm/guardrails.py @@ -5,13 +5,13 @@ import httpx from app.core.config import settings -from app.models.llm.request import GuardrailsConfig +from app.models.llm.request import Validator logger = logging.getLogger(__name__) def run_guardrails_validation( - input_text: str, guardrail_config: list[dict], job_id: UUID + input_text: str, guardrail_config: list[dict[str, Any] | Validator], job_id: UUID ) -> dict[str, Any]: """ Call the Kaapi guardrails service to validate and process input text. @@ -24,10 +24,15 @@ def run_guardrails_validation( Returns: JSON response from the guardrails service with validation results. """ + validators_payload = [ + validator.model_dump() if isinstance(validator, Validator) else validator + for validator in guardrail_config + ] + payload = { "request_id": str(job_id), "input": input_text, - "validators": guardrail_config, + "validators": validators_payload, } headers = { @@ -136,38 +141,50 @@ def create_validators_batch( def get_validators_config( - config_id: UUID | str, + validator_configs: list[Validator] | None, organization_id: int | None, project_id: int | None, ) -> tuple[list[dict[str, Any]], list[dict[str, Any]]]: """ - Fetch validator configuration for a specific config id and split by stage. + Fetch validator configurations from batch payload and split by stage. Calls: - GET /validators/configs/{config_id}?organization_id={organization_id}&project_id={project_id} + POST /validators/configs/batch/fetch?organization_id={organization_id}&project_id={project_id} """ + if not validator_configs: + return [], [] + headers = { "accept": "application/json", "Authorization": f"Bearer {settings.KAAPI_GUARDRAILS_AUTH}", + "Content-Type": "application/json", } - endpoint = f"{settings.KAAPI_GUARDRAILS_URL}/validators/configs/{config_id}" + endpoint = f"{settings.KAAPI_GUARDRAILS_URL}/validators/configs/batch/fetch" try: with httpx.Client(timeout=10.0) as client: - response = client.get( + response = client.post( endpoint, params={ "organization_id": organization_id, "project_id": project_id, }, + json=[validator.model_dump() for validator in validator_configs], headers=headers, ) response.raise_for_status() payload = response.json() - validators = payload.get("data", []) if isinstance(payload, dict) else [] + if not isinstance(payload, dict): + raise ValueError( + "Invalid validators response format: expected JSON object." + ) + if not payload.get("success", False): + raise ValueError("Validator config fetch failed: `success` is false.") + + validators = payload.get("data", []) if not isinstance(validators, list): raise ValueError( "Invalid validators response format: `data` must be a list." @@ -191,39 +208,8 @@ def get_validators_config( except Exception as e: logger.error( "[get_validators_config] Failed to fetch validator config. " - f"config_id={config_id}, organization_id={organization_id}, project_id={project_id}, error={e}" + f"validator_configs={validator_configs}, organization_id={organization_id}, project_id={project_id}, " + f"endpoint={endpoint}, error={e}" ) raise - -def build_staged_validators( - guardrails: GuardrailsConfig | None, -) -> list[dict[str, Any]]: - validators: list[dict[str, Any]] = [] - if guardrails is None: - return validators - - for validator in guardrails.input or []: - validators.append({"stage": "input", **validator}) - for validator in guardrails.output or []: - validators.append({"stage": "output", **validator}) - - return validators - - -def create_guardrails_validators_if_present( - guardrails: GuardrailsConfig | None, - guardrails_config_id: UUID, - organization_id: int | None, - project_id: int | None, -) -> None: - validators = build_staged_validators(guardrails) - if not validators: - return - - create_validators_batch( - validators=validators, - config_id=guardrails_config_id, - organization_id=organization_id, - project_id=project_id, - ) diff --git a/backend/app/services/llm/jobs.py b/backend/app/services/llm/jobs.py index bcc29602..22e388bb 100644 --- a/backend/app/services/llm/jobs.py +++ b/backend/app/services/llm/jobs.py @@ -12,7 +12,12 @@ from app.crud.credentials import get_provider_credential from app.crud.jobs import JobCrud from app.models import JobStatus, JobType, JobUpdate, LLMCallRequest -from app.models.llm.request import ConfigBlob, LLMCallConfig, KaapiCompletionConfig +from app.models.llm.request import ( + ConfigBlob, + LLMCallConfig, + KaapiCompletionConfig, + Validator, +) from app.services.llm.guardrails import get_validators_config, run_guardrails_validation from app.services.llm.providers.registry import get_llm_provider from app.services.llm.mappers import transform_kaapi_config_to_native @@ -95,27 +100,6 @@ def resolve_config_blob( config_version = config_crud.exists_or_raise(version_number=config.version) config_blob_data = dict(config_version.config_blob) - if ( - config_version.guardrails_config_id - and config_crud.organization_id is not None - and config_crud.project_id is not None - ): - try: - input_guardrails, output_guardrails = get_validators_config( - config_id=config_version.guardrails_config_id, - organization_id=config_crud.organization_id, - project_id=config_crud.project_id, - ) - config_blob_data["guardrails"] = { - "input": input_guardrails, - "output": output_guardrails, - } - except Exception as e: - logger.warning( - f"[resolve_config_blob] Failed to fetch guardrails validators for config version. " - f"guardrails_config_id={config_version.guardrails_config_id}, error={e}" - ) - except HTTPException as e: return None, f"Failed to retrieve stored configuration: {e.detail}" except Exception: @@ -199,9 +183,18 @@ def execute_job( else: config_blob = config.blob - if config_blob is not None and config_blob.guardrails is not None: - input_guardrails = config_blob.guardrails.input or [] - output_guardrails = config_blob.guardrails.output or [] + if config_blob is not None: + validator_configs: list[Validator] = [ + *(config_blob.input_guardrails or []), + *(config_blob.output_guardrails or []), + ] + + if validator_configs: + input_guardrails, output_guardrails = get_validators_config( + validator_configs=validator_configs, + organization_id=organization_id, + project_id=project_id, + ) if input_guardrails: safe_input = run_guardrails_validation( diff --git a/backend/app/tests/api/routes/configs/test_config.py b/backend/app/tests/api/routes/configs/test_config.py index e2b6ec88..705f9edc 100644 --- a/backend/app/tests/api/routes/configs/test_config.py +++ b/backend/app/tests/api/routes/configs/test_config.py @@ -1,6 +1,4 @@ from uuid import uuid4 -from unittest.mock import patch - from fastapi.testclient import TestClient from sqlmodel import Session @@ -46,10 +44,15 @@ def test_create_config_success( assert "id" in data["data"] assert "version" in data["data"] assert data["data"]["version"]["version"] == 1 - assert data["data"]["version"]["config_blob"] == config_data["config_blob"] + assert ( + data["data"]["version"]["config_blob"]["completion"] + == config_data["config_blob"]["completion"] + ) + assert data["data"]["version"]["config_blob"]["input_guardrails"] is None + assert data["data"]["version"]["config_blob"]["output_guardrails"] is None -def test_create_config_with_guardrails_excludes_guardrails_from_blob( +def test_create_config_with_guardrails_persists_validator_refs( db: Session, client: TestClient, user_api_key: TestAuthContext, @@ -62,25 +65,27 @@ def test_create_config_with_guardrails_excludes_guardrails_from_blob( "provider": "openai-native", "params": {"model": "gpt-4"}, }, - "guardrails": { - "input": [{"type": "pii_remover"}], - "output": [{"type": "gender_assumption_bias"}], - }, + "input_guardrails": [{"validator_config_id": 1}], + "output_guardrails": [{"validator_config_id": 2}], }, "commit_message": "Initial configuration", } - with patch("app.crud.config.config.create_guardrails_validators_if_present"): - response = client.post( - f"{settings.API_V1_STR}/configs/", - headers={"X-API-KEY": user_api_key.key}, - json=config_data, - ) + response = client.post( + f"{settings.API_V1_STR}/configs/", + headers={"X-API-KEY": user_api_key.key}, + json=config_data, + ) assert response.status_code == 201 data = response.json() assert data["success"] is True - assert "guardrails" not in data["data"]["version"]["config_blob"] + assert data["data"]["version"]["config_blob"]["input_guardrails"] == [ + {"validator_config_id": 1} + ] + assert data["data"]["version"]["config_blob"]["output_guardrails"] == [ + {"validator_config_id": 2} + ] def test_create_config_empty_blob_fails( diff --git a/backend/app/tests/api/routes/configs/test_version.py b/backend/app/tests/api/routes/configs/test_version.py index 34acbceb..2a1e593d 100644 --- a/backend/app/tests/api/routes/configs/test_version.py +++ b/backend/app/tests/api/routes/configs/test_version.py @@ -1,6 +1,4 @@ from uuid import uuid4 -from unittest.mock import patch - from fastapi.testclient import TestClient from sqlmodel import Session @@ -53,12 +51,16 @@ def test_create_version_success( assert ( data["data"]["version"] == 2 ) # First version created with config, this is second - assert data["data"]["config_blob"] == version_data["config_blob"] + assert data["data"]["config_blob"]["completion"] == version_data["config_blob"][ + "completion" + ] + assert data["data"]["config_blob"]["input_guardrails"] is None + assert data["data"]["config_blob"]["output_guardrails"] is None assert data["data"]["commit_message"] == version_data["commit_message"] assert data["data"]["config_id"] == str(config.id) -def test_create_version_with_guardrails_excludes_guardrails_from_blob( +def test_create_version_with_guardrails_persists_validator_refs( db: Session, client: TestClient, user_api_key: TestAuthContext, @@ -75,25 +77,27 @@ def test_create_version_with_guardrails_excludes_guardrails_from_blob( "provider": "openai-native", "params": {"model": "gpt-4-turbo"}, }, - "guardrails": { - "input": [{"type": "pii_remover"}], - "output": [{"type": "gender_assumption_bias"}], - }, + "input_guardrails": [{"validator_config_id": 1}], + "output_guardrails": [{"validator_config_id": 2}], }, "commit_message": "Guardrails config", } - with patch("app.crud.config.version.create_guardrails_validators_if_present"): - response = client.post( - f"{settings.API_V1_STR}/configs/{config.id}/versions", - headers={"X-API-KEY": user_api_key.key}, - json=version_data, - ) + response = client.post( + f"{settings.API_V1_STR}/configs/{config.id}/versions", + headers={"X-API-KEY": user_api_key.key}, + json=version_data, + ) assert response.status_code == 201 data = response.json() assert data["success"] is True - assert "guardrails" not in data["data"]["config_blob"] + assert data["data"]["config_blob"]["input_guardrails"] == [ + {"validator_config_id": 1} + ] + assert data["data"]["config_blob"]["output_guardrails"] == [ + {"validator_config_id": 2} + ] def test_create_version_empty_blob_fails( diff --git a/backend/app/tests/crud/config/test_config.py b/backend/app/tests/crud/config/test_config.py index 29dec655..a0f54843 100644 --- a/backend/app/tests/crud/config/test_config.py +++ b/backend/app/tests/crud/config/test_config.py @@ -1,6 +1,4 @@ from uuid import uuid4 -from unittest.mock import patch - import pytest from sqlmodel import Session from fastapi import HTTPException @@ -56,11 +54,11 @@ def test_create_config(db: Session, example_config_blob: ConfigBlob) -> None: assert version.id is not None assert version.config_id == config.id assert version.version == 1 - assert version.config_blob == example_config_blob.model_dump(exclude={"guardrails"}) + assert version.config_blob == example_config_blob.model_dump() assert version.commit_message == "Initial version" -def test_create_config_with_guardrails_excludes_guardrails_from_blob( +def test_create_config_with_guardrails_persists_validator_refs( db: Session, ) -> None: project = create_test_project(db) @@ -78,22 +76,16 @@ def test_create_config_with_guardrails_excludes_guardrails_from_blob( provider="openai-native", params={"model": "gpt-4"}, ), - guardrails={ - "input": [{"type": "pii_remover"}], - "output": [{"type": "gender_assumption_bias"}], - }, + input_guardrails=[{"validator_config_id": 1}], + output_guardrails=[{"validator_config_id": 2}], ), commit_message="Initial version", ) - with patch( - "app.crud.config.config.create_guardrails_validators_if_present" - ) as mock_create_guardrails: - _, version = config_crud.create_or_raise(config_create) + _, version = config_crud.create_or_raise(config_create) - mock_create_guardrails.assert_called_once() - assert "guardrails" not in version.config_blob - assert version.guardrails_config_id is not None + assert version.config_blob["input_guardrails"] == [{"validator_config_id": 1}] + assert version.config_blob["output_guardrails"] == [{"validator_config_id": 2}] def test_create_config_duplicate_name( diff --git a/backend/app/tests/crud/config/test_version.py b/backend/app/tests/crud/config/test_version.py index df238aba..9ecdc188 100644 --- a/backend/app/tests/crud/config/test_version.py +++ b/backend/app/tests/crud/config/test_version.py @@ -1,6 +1,4 @@ from uuid import uuid4 -from unittest.mock import patch - import pytest from sqlmodel import Session from fastapi import HTTPException @@ -36,7 +34,7 @@ def test_create_version(db: Session, example_config_blob: ConfigBlob) -> None: session=db, project_id=config.project_id, config_id=config.id ) - config_blob = example_config_blob.model_dump(exclude={"guardrails"}) + config_blob = example_config_blob.model_dump() version_create = ConfigVersionCreate( config_blob=config_blob, commit_message="Updated model and parameters", @@ -52,7 +50,7 @@ def test_create_version(db: Session, example_config_blob: ConfigBlob) -> None: assert version.deleted_at is None -def test_create_version_with_guardrails_excludes_guardrails_from_blob( +def test_create_version_with_guardrails_persists_validator_refs( db: Session, ) -> None: config = create_test_config(db) @@ -71,22 +69,16 @@ def test_create_version_with_guardrails_excludes_guardrails_from_blob( provider="openai-native", params={"model": "gpt-4"}, ), - guardrails={ - "input": [{"type": "pii_remover"}], - "output": [{"type": "gender_assumption_bias"}], - }, + input_guardrails=[{"validator_config_id": 1}], + output_guardrails=[{"validator_config_id": 2}], ), commit_message="Guardrails version", ) - with patch( - "app.crud.config.version.create_guardrails_validators_if_present" - ) as mock_create_guardrails: - version = version_crud.create_or_raise(version_create) + version = version_crud.create_or_raise(version_create) - mock_create_guardrails.assert_called_once() - assert "guardrails" not in version.config_blob - assert version.guardrails_config_id is not None + assert version.config_blob["input_guardrails"] == [{"validator_config_id": 1}] + assert version.config_blob["output_guardrails"] == [{"validator_config_id": 2}] def test_create_version_auto_increment( @@ -156,9 +148,7 @@ def test_read_one_version(db: Session, example_config_blob: ConfigBlob) -> None: assert fetched_version.id == version.id assert fetched_version.version == version.version assert fetched_version.config_id == config.id - assert fetched_version.config_blob == example_config_blob.model_dump( - exclude={"guardrails"} - ) + assert fetched_version.config_blob == example_config_blob.model_dump() def test_read_one_version_not_found(db: Session) -> None: diff --git a/backend/app/tests/services/llm/test_guardrails.py b/backend/app/tests/services/llm/test_guardrails.py index ec71cbc3..821f0857 100644 --- a/backend/app/tests/services/llm/test_guardrails.py +++ b/backend/app/tests/services/llm/test_guardrails.py @@ -4,8 +4,8 @@ import httpx from app.core.config import settings +from app.models.llm.request import Validator from app.services.llm.guardrails import ( - create_guardrails_validators_if_present, create_validators_batch, get_validators_config, run_guardrails_validation, @@ -76,6 +76,24 @@ def test_run_guardrails_validation_uses_settings(mock_client_cls) -> None: ) +@patch("app.services.llm.guardrails.httpx.Client") +def test_run_guardrails_validation_serializes_validator_models(mock_client_cls) -> None: + mock_response = MagicMock() + mock_response.raise_for_status.return_value = None + mock_response.json.return_value = {"success": True} + + mock_client = MagicMock() + mock_client.post.return_value = mock_response + mock_client_cls.return_value.__enter__.return_value = mock_client + + run_guardrails_validation( + TEST_TEXT, [Validator(validator_config_id=123)], TEST_JOB_ID + ) + + _, kwargs = mock_client.post.call_args + assert kwargs["json"]["validators"] == [{"validator_config_id": 123}] + + @patch("app.services.llm.guardrails.httpx.Client") def test_create_validators_batch_success(mock_client_cls) -> None: validators = [{"stage": "input", "type": "pii_remover"}] @@ -105,7 +123,11 @@ def test_create_validators_batch_success(mock_client_cls) -> None: @patch("app.services.llm.guardrails.httpx.Client") def test_get_validators_config_splits_input_output(mock_client_cls) -> None: - config_id = uuid.uuid4() + validator_configs = [ + Validator(validator_config_id=5), + Validator(validator_config_id=6), + Validator(validator_config_id=7), + ] mock_response = MagicMock() mock_response.raise_for_status.return_value = None @@ -123,11 +145,11 @@ def test_get_validators_config_splits_input_output(mock_client_cls) -> None: } mock_client = MagicMock() - mock_client.get.return_value = mock_response + mock_client.post.return_value = mock_response mock_client_cls.return_value.__enter__.return_value = mock_client input_guardrails, output_guardrails = get_validators_config( - config_id=config_id, + validator_configs=validator_configs, organization_id=1, project_id=1, ) @@ -136,15 +158,18 @@ def test_get_validators_config_splits_input_output(mock_client_cls) -> None: assert len(output_guardrails) == 1 assert all(g["stage"] == "input" for g in input_guardrails) assert all(g["stage"] == "output" for g in output_guardrails) + _, kwargs = mock_client.post.call_args + assert kwargs["json"] == [v.model_dump() for v in validator_configs] -@patch("app.services.llm.guardrails.create_validators_batch") -def test_create_guardrails_validators_if_present_noop_on_none(mock_batch) -> None: - create_guardrails_validators_if_present( - guardrails=None, - guardrails_config_id=uuid.uuid4(), +@patch("app.services.llm.guardrails.httpx.Client") +def test_get_validators_config_empty_short_circuits_without_http(mock_client_cls) -> None: + input_guardrails, output_guardrails = get_validators_config( + validator_configs=[], organization_id=1, project_id=1, ) - mock_batch.assert_not_called() + assert input_guardrails == [] + assert output_guardrails == [] + mock_client_cls.assert_not_called() diff --git a/backend/app/tests/services/llm/test_jobs.py b/backend/app/tests/services/llm/test_jobs.py index 1f9a5dd9..4b3cd2ad 100644 --- a/backend/app/tests/services/llm/test_jobs.py +++ b/backend/app/tests/services/llm/test_jobs.py @@ -735,9 +735,10 @@ def test_guardrails_sanitize_input_before_provider( unsafe_input = "My credit card is 4111 1111 1111 1111" - with patch( - "app.services.llm.jobs.run_guardrails_validation" - ) as mock_guardrails: + with ( + patch("app.services.llm.jobs.run_guardrails_validation") as mock_guardrails, + patch("app.services.llm.jobs.get_validators_config") as mock_fetch_configs, + ): mock_guardrails.return_value = { "success": True, "bypassed": False, @@ -746,6 +747,10 @@ def test_guardrails_sanitize_input_before_provider( "rephrase_needed": False, }, } + mock_fetch_configs.return_value = ( + [{"type": "pii_remover", "stage": "input"}], + [], + ) request_data = { "query": {"input": unsafe_input}, @@ -755,10 +760,8 @@ def test_guardrails_sanitize_input_before_provider( "provider": "openai-native", "params": {"model": "gpt-4"}, }, - "guardrails": { - "input": [{"type": "pii_remover"}], - "output": [], - }, + "input_guardrails": [{"validator_config_id": 1}], + "output_guardrails": [], } }, "include_provider_raw_response": False, @@ -780,9 +783,10 @@ def test_guardrails_sanitize_output_after_provider( env["mock_llm_response"].response.output.text = "Aadhar no 123-45-6789" env["provider"].execute.return_value = (env["mock_llm_response"], None) - with patch( - "app.services.llm.jobs.run_guardrails_validation" - ) as mock_guardrails: + with ( + patch("app.services.llm.jobs.run_guardrails_validation") as mock_guardrails, + patch("app.services.llm.jobs.get_validators_config") as mock_fetch_configs, + ): mock_guardrails.return_value = { "success": True, "bypassed": False, @@ -791,6 +795,10 @@ def test_guardrails_sanitize_output_after_provider( "rephrase_needed": False, }, } + mock_fetch_configs.return_value = ( + [], + [{"type": "pii_remover", "stage": "output"}], + ) request_data = { "query": {"input": "hello"}, @@ -800,10 +808,8 @@ def test_guardrails_sanitize_output_after_provider( "provider": "openai-native", "params": {"model": "gpt-4"}, }, - "guardrails": { - "input": [], - "output": [{"type": "pii_remover"}], - }, + "input_guardrails": [], + "output_guardrails": [{"validator_config_id": 2}], } }, } @@ -820,9 +826,10 @@ def test_guardrails_bypass_does_not_modify_input( unsafe_input = "4111 1111 1111 1111" - with patch( - "app.services.llm.jobs.run_guardrails_validation" - ) as mock_guardrails: + with ( + patch("app.services.llm.jobs.run_guardrails_validation") as mock_guardrails, + patch("app.services.llm.jobs.get_validators_config") as mock_fetch_configs, + ): mock_guardrails.return_value = { "success": True, "bypassed": True, @@ -831,6 +838,10 @@ def test_guardrails_bypass_does_not_modify_input( "rephrase_needed": False, }, } + mock_fetch_configs.return_value = ( + [{"type": "pii_remover", "stage": "input"}], + [], + ) request_data = { "query": {"input": unsafe_input}, @@ -840,10 +851,8 @@ def test_guardrails_bypass_does_not_modify_input( "provider": "openai-native", "params": {"model": "gpt-4"}, }, - "guardrails": { - "input": [{"type": "pii_remover"}], - "output": [], - }, + "input_guardrails": [{"validator_config_id": 1}], + "output_guardrails": [], } }, } @@ -857,13 +866,18 @@ def test_guardrails_validation_failure_blocks_job( ): env = job_env - with patch( - "app.services.llm.jobs.run_guardrails_validation" - ) as mock_guardrails: + with ( + patch("app.services.llm.jobs.run_guardrails_validation") as mock_guardrails, + patch("app.services.llm.jobs.get_validators_config") as mock_fetch_configs, + ): mock_guardrails.return_value = { "success": False, "error": "Unsafe content detected", } + mock_fetch_configs.return_value = ( + [{"type": "uli_slur_match", "stage": "input"}], + [], + ) request_data = { "query": {"input": "bad input"}, @@ -873,10 +887,8 @@ def test_guardrails_validation_failure_blocks_job( "provider": "openai-native", "params": {"model": "gpt-4"}, }, - "guardrails": { - "input": [{"type": "uli_slur_match"}], - "output": [], - }, + "input_guardrails": [{"validator_config_id": 1}], + "output_guardrails": [], } }, } @@ -891,9 +903,10 @@ def test_guardrails_rephrase_needed_blocks_job( ): env = job_env - with patch( - "app.services.llm.jobs.run_guardrails_validation" - ) as mock_guardrails: + with ( + patch("app.services.llm.jobs.run_guardrails_validation") as mock_guardrails, + patch("app.services.llm.jobs.get_validators_config") as mock_fetch_configs, + ): mock_guardrails.return_value = { "success": True, "bypassed": False, @@ -902,6 +915,10 @@ def test_guardrails_rephrase_needed_blocks_job( "rephrase_needed": True, }, } + mock_fetch_configs.return_value = ( + [{"type": "policy", "stage": "input"}], + [], + ) request_data = { "query": {"input": "unsafe text"}, @@ -911,10 +928,8 @@ def test_guardrails_rephrase_needed_blocks_job( "provider": "openai-native", "params": {"model": "gpt-4"}, }, - "guardrails": { - "input": [{"type": "policy"}], - "output": [], - }, + "input_guardrails": [{"validator_config_id": 1}], + "output_guardrails": [], } }, } @@ -923,6 +938,36 @@ def test_guardrails_rephrase_needed_blocks_job( assert not result["success"] env["provider"].execute.assert_not_called() + def test_execute_job_fetches_validator_configs_from_blob_refs( + self, db, job_env, job_for_execution + ): + env = job_env + env["provider"].execute.return_value = (env["mock_llm_response"], None) + + with patch("app.services.llm.jobs.get_validators_config") as mock_fetch_configs: + mock_fetch_configs.return_value = ([], []) + + request_data = { + "query": {"input": "hello"}, + "config": { + "blob": { + "completion": { + "provider": "openai-native", + "params": {"model": "gpt-4"}, + }, + "input_guardrails": [{"validator_config_id": 1}], + "output_guardrails": [{"validator_config_id": 2}], + } + }, + } + result = self._execute_job(job_for_execution, db, request_data) + + assert result["success"] + mock_fetch_configs.assert_called_once() + _, kwargs = mock_fetch_configs.call_args + validator_configs = kwargs["validator_configs"] + assert [v.validator_config_id for v in validator_configs] == [1, 2] + class TestResolveConfigBlob: """Test suite for resolve_config_blob function.""" @@ -953,44 +998,34 @@ def test_resolve_config_blob_success(self, db: Session): assert resolved_blob.completion.params["model"] == "gpt-4" assert resolved_blob.completion.params["temperature"] == 0.8 - def test_resolve_config_blob_fetches_guardrails_by_guardrails_config_id( - self, db: Session - ): + def test_resolve_config_blob_keeps_validator_refs(self, db: Session): project = get_project(db) - config = create_test_config(db, project_id=project.id) - db.commit() - - statement = select(ConfigVersion).where( - (ConfigVersion.config_id == config.id) & (ConfigVersion.version == 1) + config_blob = ConfigBlob( + completion=NativeCompletionConfig( + provider="openai-native", + params={"model": "gpt-4"}, + ), + input_guardrails=[{"validator_config_id": 1}], + output_guardrails=[{"validator_config_id": 2}], ) - config_version = db.exec(statement).one() + config = create_test_config(db, project_id=project.id, config_blob=config_blob) + db.commit() config_crud = ConfigVersionCrud( session=db, project_id=project.id, config_id=config.id, organization_id=1 ) llm_call_config = LLMCallConfig(id=str(config.id), version=1) - with patch("app.services.llm.jobs.get_validators_config") as mock_fetch: - mock_fetch.return_value = ( - [{"type": "pii_remover", "stage": "input"}], - [{"type": "gender_assumption_bias", "stage": "output"}], - ) - resolved_blob, error = resolve_config_blob(config_crud, llm_call_config) + resolved_blob, error = resolve_config_blob(config_crud, llm_call_config) assert error is None assert resolved_blob is not None - assert resolved_blob.guardrails is not None - assert resolved_blob.guardrails.input == [ - {"type": "pii_remover", "stage": "input"} + assert [v.model_dump() for v in (resolved_blob.input_guardrails or [])] == [ + {"validator_config_id": 1} ] - assert resolved_blob.guardrails.output == [ - {"type": "gender_assumption_bias", "stage": "output"} + assert [v.model_dump() for v in (resolved_blob.output_guardrails or [])] == [ + {"validator_config_id": 2} ] - mock_fetch.assert_called_once_with( - config_id=config_version.guardrails_config_id, - organization_id=1, - project_id=project.id, - ) def test_resolve_config_blob_version_not_found(self, db: Session): """Test resolve_config_blob when version doesn't exist.""" From 744658e1a35b61d21c985fbeb192ef4c8c1b975c Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Mon, 16 Feb 2026 14:32:40 +0530 Subject: [PATCH 11/35] removed redundant changes --- backend/app/services/llm/guardrails.py | 75 -------------------------- 1 file changed, 75 deletions(-) diff --git a/backend/app/services/llm/guardrails.py b/backend/app/services/llm/guardrails.py index 78da9c5f..af398401 100644 --- a/backend/app/services/llm/guardrails.py +++ b/backend/app/services/llm/guardrails.py @@ -65,81 +65,6 @@ def run_guardrails_validation( }, } - -def create_validators_batch( - validators: list[dict[str, Any]], - config_id: UUID | None, - organization_id: int | None, - project_id: int | None, -) -> list[dict[str, Any]]: - """ - Batch create validator configs via Kaapi Guardrails service. - - Args: - validators: List of validator creation payloads - config_id: Optional config UUID associated with this batch - - Returns: - List of created validator objects (includes UUIDs) - """ - - headers = { - "accept": "application/json", - "Authorization": f"Bearer {settings.KAAPI_GUARDRAILS_AUTH}", - "Content-Type": "application/json", - } - - try: - payload: dict[str, Any] | list[dict] - - if config_id is None: - raise ValueError("config_id must be provided") - - payload = { - "config_id": str(config_id) if config_id is not None else None, - "validators": validators, - } - - logger.info( - "[create_validators_batch] Requesting validator batch creation. " - f"config_id={config_id}, organization_id={organization_id}, " - f"project_id={project_id}, validators_count={len(validators)}" - ) - - with httpx.Client(timeout=10.0) as client: - response = client.post( - f"{settings.KAAPI_GUARDRAILS_URL}/validators/configs/batch", - params={ - "organization_id": organization_id, - "project_id": project_id, - }, - json=payload, - headers=headers, - ) - - response.raise_for_status() - - data = response.json() - if not isinstance(data, dict): - raise ValueError( - "Invalid response format from guardrails service: expected object." - ) - - validators_data = data.get("data") - if not isinstance(validators_data, list): - raise ValueError( - "Invalid response format from guardrails service: `data` must be a list." - ) - - return validators_data - - except Exception as e: - logger.error( - f"[create_validators_batch] Failed to create validators. error={e}" - ) - raise - - def get_validators_config( validator_configs: list[Validator] | None, organization_id: int | None, From 11ddb5834264050985c00d4d587438bc1741289e Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Mon, 16 Feb 2026 14:52:17 +0530 Subject: [PATCH 12/35] removed redundant code --- backend/app/api/routes/config/config.py | 4 +-- backend/app/api/routes/config/version.py | 6 ++--- backend/app/crud/config/config.py | 5 ++-- backend/app/crud/config/version.py | 9 +------ backend/app/models/config/version.py | 2 +- backend/app/services/llm/jobs.py | 10 ++------ .../tests/api/routes/configs/test_config.py | 1 + .../tests/api/routes/configs/test_version.py | 25 ++++++++++++++----- backend/app/tests/crud/config/test_config.py | 7 ++---- backend/app/tests/crud/config/test_version.py | 25 +++++++------------ 10 files changed, 41 insertions(+), 53 deletions(-) diff --git a/backend/app/api/routes/config/config.py b/backend/app/api/routes/config/config.py index 478505ce..6d262944 100644 --- a/backend/app/api/routes/config/config.py +++ b/backend/app/api/routes/config/config.py @@ -33,9 +33,7 @@ def create_config( """ create new config along with initial version """ - project_id = current_user.project_.id - organization_id = current_user.organization_.id - config_crud = ConfigCrud(session, project_id, organization_id) + config_crud = ConfigCrud(session=session, project_id=current_user.project_.id) config, version = config_crud.create_or_raise(config_create) response = ConfigWithVersion(**config.model_dump(), version=version) diff --git a/backend/app/api/routes/config/version.py b/backend/app/api/routes/config/version.py index 5871c20b..fd5e057f 100644 --- a/backend/app/api/routes/config/version.py +++ b/backend/app/api/routes/config/version.py @@ -35,9 +35,9 @@ def create_version( Provider, model, and params can be changed. Type is inherited from existing config and cannot be changed. """ - project_id = current_user.project_.id - organization_id = current_user.organization_.id - version_crud = ConfigVersionCrud(session, config_id, project_id, organization_id) + version_crud = ConfigVersionCrud( + session=session, project_id=current_user.project_.id, config_id=config_id + ) version = version_crud.create_or_raise(version_create=version_create) return APIResponse.success_response( diff --git a/backend/app/crud/config/config.py b/backend/app/crud/config/config.py index e3288e19..b644aa55 100644 --- a/backend/app/crud/config/config.py +++ b/backend/app/crud/config/config.py @@ -1,6 +1,6 @@ import logging from uuid import UUID -from typing import Optional, Tuple +from typing import Tuple from sqlmodel import Session, select, and_ from fastapi import HTTPException @@ -22,11 +22,10 @@ class ConfigCrud: """ def __init__( - self, session: Session, project_id: int, organization_id: Optional[int] = None + self, session: Session, project_id: int ): self.session = session self.project_id = project_id - self.organization_id = organization_id def create_or_raise( self, config_create: ConfigCreate diff --git a/backend/app/crud/config/version.py b/backend/app/crud/config/version.py index 80649f63..12279f37 100644 --- a/backend/app/crud/config/version.py +++ b/backend/app/crud/config/version.py @@ -27,17 +27,10 @@ class ConfigVersionCrud: CRUD operations for configuration versions scoped to a project. """ - def __init__( - self, - session: Session, - config_id: UUID, - project_id: int, - organization_id: Optional[int] = None, - ): + def __init__(self, session: Session, config_id: UUID, project_id: int): self.session = session self.project_id = project_id self.config_id = config_id - self.organization_id = organization_id def create_or_raise(self, version_create: ConfigVersionUpdate) -> ConfigVersion: """ diff --git a/backend/app/models/config/version.py b/backend/app/models/config/version.py index 188cc39e..8c62c469 100644 --- a/backend/app/models/config/version.py +++ b/backend/app/models/config/version.py @@ -136,4 +136,4 @@ class ConfigVersionItems(SQLModel): description="Optional message describing the changes in this version", ) inserted_at: datetime - updated_at: datetime \ No newline at end of file + updated_at: datetime diff --git a/backend/app/services/llm/jobs.py b/backend/app/services/llm/jobs.py index b7866b79..359c2826 100644 --- a/backend/app/services/llm/jobs.py +++ b/backend/app/services/llm/jobs.py @@ -109,8 +109,6 @@ def resolve_config_blob( """ try: config_version = config_crud.exists_or_raise(version_number=config.version) - config_blob_data = dict(config_version.config_blob) - except HTTPException as e: return None, f"Failed to retrieve stored configuration: {e.detail}" except Exception: @@ -122,8 +120,7 @@ def resolve_config_blob( return None, "Unexpected error occurred while retrieving stored configuration" try: - config_blob, error = ConfigBlob(**config_blob_data), None - return config_blob, error + return ConfigBlob(**config_version.config_blob), None except (TypeError, ValueError) as e: return None, f"Stored configuration blob is invalid: {str(e)}" except Exception: @@ -192,10 +189,7 @@ def execute_job( # if stored config, fetch blob from DB if config.is_stored_config: config_crud = ConfigVersionCrud( - session=session, - project_id=project_id, - config_id=config.id, - organization_id=organization_id, + session=session, project_id=project_id, config_id=config.id ) # blob is dynamic, need to resolve to ConfigBlob format diff --git a/backend/app/tests/api/routes/configs/test_config.py b/backend/app/tests/api/routes/configs/test_config.py index 6f81908e..0db7e131 100644 --- a/backend/app/tests/api/routes/configs/test_config.py +++ b/backend/app/tests/api/routes/configs/test_config.py @@ -68,6 +68,7 @@ def test_create_config_with_guardrails_persists_validator_refs( "config_blob": { "completion": { "provider": "openai-native", + "type": "text", "params": {"model": "gpt-4"}, }, "input_guardrails": [{"validator_config_id": 1}], diff --git a/backend/app/tests/api/routes/configs/test_version.py b/backend/app/tests/api/routes/configs/test_version.py index 1d9a0aca..81e62372 100644 --- a/backend/app/tests/api/routes/configs/test_version.py +++ b/backend/app/tests/api/routes/configs/test_version.py @@ -51,9 +51,15 @@ def test_create_version_success( assert ( data["data"]["version"] == 2 ) # First version created with config, this is second - assert data["data"]["config_blob"]["completion"] == version_data["config_blob"][ - "completion" - ] + assert data["data"]["config_blob"]["completion"]["provider"] == "openai-native" + assert data["data"]["config_blob"]["completion"]["type"] == "text" + assert ( + data["data"]["config_blob"]["completion"]["params"]["model"] == "gpt-4-turbo" + ) + assert ( + data["data"]["config_blob"]["completion"]["params"]["temperature"] == 0.9 + ) + assert data["data"]["config_blob"]["completion"]["params"]["max_tokens"] == 3000 assert data["data"]["config_blob"]["input_guardrails"] is None assert data["data"]["config_blob"]["output_guardrails"] is None assert data["data"]["commit_message"] == version_data["commit_message"] @@ -79,6 +85,7 @@ def test_create_version_with_guardrails_persists_validator_refs( "config_blob": { "completion": { "provider": "openai-native", + "type": "text", "params": {"model": "gpt-4-turbo"}, }, "input_guardrails": [{"validator_config_id": 1}], @@ -104,12 +111,12 @@ def test_create_version_with_guardrails_persists_validator_refs( ] -def test_create_version_empty_blob_fails( +def test_create_version_empty_blob_creates_noop_version( db: Session, client: TestClient, user_api_key: TestAuthContext, ) -> None: - """Test that creating a version with empty config_blob fails validation.""" + """Empty partial update still creates a new version by inheriting previous blob.""" config = create_test_config( db=db, project_id=user_api_key.project_id, @@ -126,7 +133,10 @@ def test_create_version_empty_blob_fails( headers={"X-API-KEY": user_api_key.key}, json=version_data, ) - assert response.status_code == 422 + assert response.status_code == 201 + data = response.json() + assert data["success"] is True + assert data["data"]["version"] == 2 def test_create_version_nonexistent_config( @@ -140,6 +150,7 @@ def test_create_version_nonexistent_config( "config_blob": { "completion": { "provider": "openai", + "type": "text", "params": {"model": "gpt-4"}, } }, @@ -171,6 +182,7 @@ def test_create_version_different_project_fails( "config_blob": { "completion": { "provider": "openai", + "type": "text", "params": {"model": "gpt-4"}, } }, @@ -203,6 +215,7 @@ def test_create_version_auto_increments( "config_blob": { "completion": { "provider": "openai", + "type": "text", "params": {"model": f"gpt-4-version-{i}"}, } }, diff --git a/backend/app/tests/crud/config/test_config.py b/backend/app/tests/crud/config/test_config.py index 107c2f46..fe0caed3 100644 --- a/backend/app/tests/crud/config/test_config.py +++ b/backend/app/tests/crud/config/test_config.py @@ -63,11 +63,7 @@ def test_create_config_with_guardrails_persists_validator_refs( db: Session, ) -> None: project = create_test_project(db) - config_crud = ConfigCrud( - session=db, - project_id=project.id, - organization_id=project.organization_id, - ) + config_crud = ConfigCrud(session=db, project_id=project.id) config_create = ConfigCreate( name=f"test-config-{random_lower_string()}", @@ -75,6 +71,7 @@ def test_create_config_with_guardrails_persists_validator_refs( config_blob=ConfigBlob( completion=NativeCompletionConfig( provider="openai-native", + type="text", params={"model": "gpt-4"}, ), input_guardrails=[{"validator_config_id": 1}], diff --git a/backend/app/tests/crud/config/test_version.py b/backend/app/tests/crud/config/test_version.py index 4606a82f..dc983c47 100644 --- a/backend/app/tests/crud/config/test_version.py +++ b/backend/app/tests/crud/config/test_version.py @@ -3,7 +3,7 @@ from sqlmodel import Session from fastapi import HTTPException -from app.models import ConfigVersionCreate, ConfigBlob, Project +from app.models import ConfigVersionUpdate, ConfigBlob from app.models.llm.request import NativeCompletionConfig from app.crud.config import ConfigVersionCrud from app.tests.utils.test_data import ( @@ -55,28 +55,21 @@ def test_create_version_with_guardrails_persists_validator_refs( db: Session, ) -> None: config = create_test_config(db) - project = db.get(Project, config.project_id) - assert project is not None version_crud = ConfigVersionCrud( session=db, project_id=config.project_id, config_id=config.id, - organization_id=project.organization_id, - ) - - version_create = ConfigVersionCreate( - config_blob=ConfigBlob( - completion=NativeCompletionConfig( - provider="openai-native", - params={"model": "gpt-4"}, - ), - input_guardrails=[{"validator_config_id": 1}], - output_guardrails=[{"validator_config_id": 2}], - ), + ) + + version_update = ConfigVersionUpdate( + config_blob={ + "input_guardrails": [{"validator_config_id": 1}], + "output_guardrails": [{"validator_config_id": 2}], + }, commit_message="Guardrails version", ) - version = version_crud.create_or_raise(version_create) + version = version_crud.create_or_raise(version_update) assert version.config_blob["input_guardrails"] == [{"validator_config_id": 1}] assert version.config_blob["output_guardrails"] == [{"validator_config_id": 2}] From 67f615ed0b39d3993e4bd44b9f802472ea13bb8b Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Mon, 16 Feb 2026 14:53:03 +0530 Subject: [PATCH 13/35] updated code --- backend/app/crud/config/config.py | 4 +--- backend/app/crud/config/version.py | 1 - 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/backend/app/crud/config/config.py b/backend/app/crud/config/config.py index b644aa55..69d4bced 100644 --- a/backend/app/crud/config/config.py +++ b/backend/app/crud/config/config.py @@ -21,9 +21,7 @@ class ConfigCrud: CRUD operations for configurations scoped to a project. """ - def __init__( - self, session: Session, project_id: int - ): + def __init__(self, session: Session, project_id: int): self.session = session self.project_id = project_id diff --git a/backend/app/crud/config/version.py b/backend/app/crud/config/version.py index 12279f37..915d1b18 100644 --- a/backend/app/crud/config/version.py +++ b/backend/app/crud/config/version.py @@ -1,5 +1,4 @@ import logging -from typing import Optional from uuid import UUID from typing import Any From c82bdb53e4b9529b16bdffab358857db8a076465 Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Mon, 16 Feb 2026 15:06:13 +0530 Subject: [PATCH 14/35] updated tests --- .../tests/api/routes/configs/test_config.py | 1 + .../tests/api/routes/configs/test_version.py | 71 ++++++++----------- .../app/tests/services/llm/test_guardrails.py | 28 -------- 3 files changed, 31 insertions(+), 69 deletions(-) diff --git a/backend/app/tests/api/routes/configs/test_config.py b/backend/app/tests/api/routes/configs/test_config.py index 0db7e131..b4053b4c 100644 --- a/backend/app/tests/api/routes/configs/test_config.py +++ b/backend/app/tests/api/routes/configs/test_config.py @@ -1,4 +1,5 @@ from uuid import uuid4 + from fastapi.testclient import TestClient from sqlmodel import Session diff --git a/backend/app/tests/api/routes/configs/test_version.py b/backend/app/tests/api/routes/configs/test_version.py index 81e62372..3a47ff99 100644 --- a/backend/app/tests/api/routes/configs/test_version.py +++ b/backend/app/tests/api/routes/configs/test_version.py @@ -1,4 +1,5 @@ from uuid import uuid4 + from fastapi.testclient import TestClient from sqlmodel import Session @@ -51,17 +52,6 @@ def test_create_version_success( assert ( data["data"]["version"] == 2 ) # First version created with config, this is second - assert data["data"]["config_blob"]["completion"]["provider"] == "openai-native" - assert data["data"]["config_blob"]["completion"]["type"] == "text" - assert ( - data["data"]["config_blob"]["completion"]["params"]["model"] == "gpt-4-turbo" - ) - assert ( - data["data"]["config_blob"]["completion"]["params"]["temperature"] == 0.9 - ) - assert data["data"]["config_blob"]["completion"]["params"]["max_tokens"] == 3000 - assert data["data"]["config_blob"]["input_guardrails"] is None - assert data["data"]["config_blob"]["output_guardrails"] is None assert data["data"]["commit_message"] == version_data["commit_message"] assert data["data"]["config_id"] == str(config.id) @@ -70,6 +60,34 @@ def test_create_version_success( assert config_blob["completion"]["params"]["model"] == "gpt-4-turbo" assert config_blob["completion"]["params"]["temperature"] == 0.9 + # Verify type was inherited from existing config + assert config_blob["completion"]["type"] == "text" + + +def test_create_version_nonexistent_config( + db: Session, + client: TestClient, + user_api_key: TestAuthContext, +) -> None: + """Test creating a version for a non-existent config returns 404.""" + fake_uuid = uuid4() + version_data = { + "config_blob": { + "completion": { + "provider": "openai", + "params": {"model": "gpt-4"}, + } + }, + "commit_message": "Test", + } + + response = client.post( + f"{settings.API_V1_STR}/configs/{fake_uuid}/versions", + headers={"X-API-KEY": user_api_key.key}, + json=version_data, + ) + assert response.status_code == 404 + def test_create_version_with_guardrails_persists_validator_refs( db: Session, client: TestClient, @@ -138,33 +156,6 @@ def test_create_version_empty_blob_creates_noop_version( assert data["success"] is True assert data["data"]["version"] == 2 - -def test_create_version_nonexistent_config( - db: Session, - client: TestClient, - user_api_key: TestAuthContext, -) -> None: - """Test creating a version for a non-existent config returns 404.""" - fake_uuid = uuid4() - version_data = { - "config_blob": { - "completion": { - "provider": "openai", - "type": "text", - "params": {"model": "gpt-4"}, - } - }, - "commit_message": "Test", - } - - response = client.post( - f"{settings.API_V1_STR}/configs/{fake_uuid}/versions", - headers={"X-API-KEY": user_api_key.key}, - json=version_data, - ) - assert response.status_code == 404 - - def test_create_version_different_project_fails( db: Session, client: TestClient, @@ -182,7 +173,6 @@ def test_create_version_different_project_fails( "config_blob": { "completion": { "provider": "openai", - "type": "text", "params": {"model": "gpt-4"}, } }, @@ -215,7 +205,6 @@ def test_create_version_auto_increments( "config_blob": { "completion": { "provider": "openai", - "type": "text", "params": {"model": f"gpt-4-version-{i}"}, } }, @@ -953,4 +942,4 @@ def test_create_version_with_kaapi_tts_provider_success( assert data["success"] is True assert data["data"]["version"] == 2 assert data["data"]["config_blob"]["completion"]["provider"] == "openai" - assert data["data"]["config_blob"]["completion"]["type"] == "tts" + assert data["data"]["config_blob"]["completion"]["type"] == "tts" \ No newline at end of file diff --git a/backend/app/tests/services/llm/test_guardrails.py b/backend/app/tests/services/llm/test_guardrails.py index 821f0857..b7ff245f 100644 --- a/backend/app/tests/services/llm/test_guardrails.py +++ b/backend/app/tests/services/llm/test_guardrails.py @@ -6,7 +6,6 @@ from app.core.config import settings from app.models.llm.request import Validator from app.services.llm.guardrails import ( - create_validators_batch, get_validators_config, run_guardrails_validation, ) @@ -94,33 +93,6 @@ def test_run_guardrails_validation_serializes_validator_models(mock_client_cls) assert kwargs["json"]["validators"] == [{"validator_config_id": 123}] -@patch("app.services.llm.guardrails.httpx.Client") -def test_create_validators_batch_success(mock_client_cls) -> None: - validators = [{"stage": "input", "type": "pii_remover"}] - config_id = uuid.uuid4() - - mock_response = MagicMock() - mock_response.raise_for_status.return_value = None - mock_response.json.return_value = {"success": True, "data": [{"id": "v1"}]} - - mock_client = MagicMock() - mock_client.post.return_value = mock_response - mock_client_cls.return_value.__enter__.return_value = mock_client - - result = create_validators_batch( - validators=validators, - config_id=config_id, - organization_id=1, - project_id=2, - ) - - assert result == [{"id": "v1"}] - _, kwargs = mock_client.post.call_args - assert kwargs["json"]["config_id"] == str(config_id) - assert kwargs["json"]["validators"] == validators - assert kwargs["params"] == {"organization_id": 1, "project_id": 2} - - @patch("app.services.llm.guardrails.httpx.Client") def test_get_validators_config_splits_input_output(mock_client_cls) -> None: validator_configs = [ From 8b235a823cae476817799ee77e0debf09658c64f Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Mon, 16 Feb 2026 15:07:36 +0530 Subject: [PATCH 15/35] updates --- backend/app/tests/api/routes/configs/test_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/app/tests/api/routes/configs/test_version.py b/backend/app/tests/api/routes/configs/test_version.py index 3a47ff99..b8fdb848 100644 --- a/backend/app/tests/api/routes/configs/test_version.py +++ b/backend/app/tests/api/routes/configs/test_version.py @@ -942,4 +942,4 @@ def test_create_version_with_kaapi_tts_provider_success( assert data["success"] is True assert data["data"]["version"] == 2 assert data["data"]["config_blob"]["completion"]["provider"] == "openai" - assert data["data"]["config_blob"]["completion"]["type"] == "tts" \ No newline at end of file + assert data["data"]["config_blob"]["completion"]["type"] == "tts" From ea91f811b13b2c928456c91da1da37d7f992716a Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Mon, 16 Feb 2026 15:11:19 +0530 Subject: [PATCH 16/35] precommit --- backend/app/models/llm/request.py | 1 + backend/app/services/llm/guardrails.py | 2 +- backend/app/services/llm/jobs.py | 4 +++- backend/app/tests/api/routes/configs/test_config.py | 2 ++ backend/app/tests/api/routes/configs/test_version.py | 2 ++ backend/app/tests/services/llm/test_guardrails.py | 4 +++- 6 files changed, 12 insertions(+), 3 deletions(-) diff --git a/backend/app/models/llm/request.py b/backend/app/models/llm/request.py index 613c92b1..a14e0c9d 100644 --- a/backend/app/models/llm/request.py +++ b/backend/app/models/llm/request.py @@ -209,6 +209,7 @@ def validate_params(self): Field(discriminator="provider"), ] + class Validator(SQLModel): validator_config_id: int = Field(..., description="Validator config") diff --git a/backend/app/services/llm/guardrails.py b/backend/app/services/llm/guardrails.py index af398401..458809eb 100644 --- a/backend/app/services/llm/guardrails.py +++ b/backend/app/services/llm/guardrails.py @@ -65,6 +65,7 @@ def run_guardrails_validation( }, } + def get_validators_config( validator_configs: list[Validator] | None, organization_id: int | None, @@ -137,4 +138,3 @@ def get_validators_config( f"endpoint={endpoint}, error={e}" ) raise - diff --git a/backend/app/services/llm/jobs.py b/backend/app/services/llm/jobs.py index 359c2826..fdf8b829 100644 --- a/backend/app/services/llm/jobs.py +++ b/backend/app/services/llm/jobs.py @@ -367,7 +367,9 @@ def execute_job( if response: if output_guardrails: output_text = response.response.output.content.value - safe_output = run_guardrails_validation(output_text, output_guardrails, job_id) + safe_output = run_guardrails_validation( + output_text, output_guardrails, job_id + ) logger.info( f"[execute_job] Output guardrail validation | success={safe_output['success']}." diff --git a/backend/app/tests/api/routes/configs/test_config.py b/backend/app/tests/api/routes/configs/test_config.py index b4053b4c..6cfc9d51 100644 --- a/backend/app/tests/api/routes/configs/test_config.py +++ b/backend/app/tests/api/routes/configs/test_config.py @@ -58,6 +58,7 @@ def test_create_config_success( == 0.8 ) + def test_create_config_with_guardrails_persists_validator_refs( db: Session, client: TestClient, @@ -94,6 +95,7 @@ def test_create_config_with_guardrails_persists_validator_refs( {"validator_config_id": 2} ] + def test_create_config_empty_blob_fails( db: Session, client: TestClient, diff --git a/backend/app/tests/api/routes/configs/test_version.py b/backend/app/tests/api/routes/configs/test_version.py index b8fdb848..7acba795 100644 --- a/backend/app/tests/api/routes/configs/test_version.py +++ b/backend/app/tests/api/routes/configs/test_version.py @@ -88,6 +88,7 @@ def test_create_version_nonexistent_config( ) assert response.status_code == 404 + def test_create_version_with_guardrails_persists_validator_refs( db: Session, client: TestClient, @@ -156,6 +157,7 @@ def test_create_version_empty_blob_creates_noop_version( assert data["success"] is True assert data["data"]["version"] == 2 + def test_create_version_different_project_fails( db: Session, client: TestClient, diff --git a/backend/app/tests/services/llm/test_guardrails.py b/backend/app/tests/services/llm/test_guardrails.py index b7ff245f..2fd18812 100644 --- a/backend/app/tests/services/llm/test_guardrails.py +++ b/backend/app/tests/services/llm/test_guardrails.py @@ -135,7 +135,9 @@ def test_get_validators_config_splits_input_output(mock_client_cls) -> None: @patch("app.services.llm.guardrails.httpx.Client") -def test_get_validators_config_empty_short_circuits_without_http(mock_client_cls) -> None: +def test_get_validators_config_empty_short_circuits_without_http( + mock_client_cls, +) -> None: input_guardrails, output_guardrails = get_validators_config( validator_configs=[], organization_id=1, From c62e61966b35d94e6d42a6456a7a2e6ca10669d0 Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Mon, 16 Feb 2026 16:40:04 +0530 Subject: [PATCH 17/35] updated schema --- backend/app/models/llm/request.py | 9 ++---- backend/app/services/llm/guardrails.py | 29 +++++++++---------- backend/app/services/llm/jobs.py | 15 ++++------ .../app/tests/services/llm/test_guardrails.py | 23 +++++++-------- 4 files changed, 31 insertions(+), 45 deletions(-) diff --git a/backend/app/models/llm/request.py b/backend/app/models/llm/request.py index a14e0c9d..f3566d30 100644 --- a/backend/app/models/llm/request.py +++ b/backend/app/models/llm/request.py @@ -209,22 +209,17 @@ def validate_params(self): Field(discriminator="provider"), ] - -class Validator(SQLModel): - validator_config_id: int = Field(..., description="Validator config") - - class ConfigBlob(SQLModel): """Raw JSON blob of config.""" completion: CompletionConfig = Field(..., description="Completion configuration") - input_guardrails: list[Validator] | None = Field( + input_guardrails: list[UUID] | None = Field( default=None, description="Guardrails applied to validate/sanitize the input before the LLM call", ) - output_guardrails: list[Validator] | None = Field( + output_guardrails: list[UUID] | None = Field( default=None, description="Guardrails applied to validate/sanitize the output after the LLM call", ) diff --git a/backend/app/services/llm/guardrails.py b/backend/app/services/llm/guardrails.py index 458809eb..68d973a0 100644 --- a/backend/app/services/llm/guardrails.py +++ b/backend/app/services/llm/guardrails.py @@ -5,13 +5,12 @@ import httpx from app.core.config import settings -from app.models.llm.request import Validator logger = logging.getLogger(__name__) def run_guardrails_validation( - input_text: str, guardrail_config: list[dict[str, Any] | Validator], job_id: UUID + input_text: str, guardrail_config: list[dict[str, Any]], job_id: UUID ) -> dict[str, Any]: """ Call the Kaapi guardrails service to validate and process input text. @@ -24,15 +23,10 @@ def run_guardrails_validation( Returns: JSON response from the guardrails service with validation results. """ - validators_payload = [ - validator.model_dump() if isinstance(validator, Validator) else validator - for validator in guardrail_config - ] - payload = { "request_id": str(job_id), "input": input_text, - "validators": validators_payload, + "validators": guardrail_config, } headers = { @@ -67,17 +61,17 @@ def run_guardrails_validation( def get_validators_config( - validator_configs: list[Validator] | None, + validator_config_ids: list[UUID] | None, organization_id: int | None, project_id: int | None, ) -> tuple[list[dict[str, Any]], list[dict[str, Any]]]: """ - Fetch validator configurations from batch payload and split by stage. + Fetch validator configurations by IDs and split by stage. Calls: - POST /validators/configs/batch/fetch?organization_id={organization_id}&project_id={project_id} + GET /validators/configs/?organization_id={organization_id}&project_id={project_id}&ids={uuid} """ - if not validator_configs: + if not validator_config_ids: return [], [] headers = { @@ -86,17 +80,20 @@ def get_validators_config( "Content-Type": "application/json", } - endpoint = f"{settings.KAAPI_GUARDRAILS_URL}/validators/configs/batch/fetch" + endpoint = f"{settings.KAAPI_GUARDRAILS_URL}/validators/configs/" try: with httpx.Client(timeout=10.0) as client: - response = client.post( + response = client.get( endpoint, params={ "organization_id": organization_id, "project_id": project_id, + "ids": [ + str(validator_config_id) + for validator_config_id in validator_config_ids + ], }, - json=[validator.model_dump() for validator in validator_configs], headers=headers, ) response.raise_for_status() @@ -134,7 +131,7 @@ def get_validators_config( except Exception as e: logger.error( "[get_validators_config] Failed to fetch validator config. " - f"validator_configs={validator_configs}, organization_id={organization_id}, project_id={project_id}, " + f"validator_config_ids={validator_config_ids}, organization_id={organization_id}, project_id={project_id}, " f"endpoint={endpoint}, error={e}" ) raise diff --git a/backend/app/services/llm/jobs.py b/backend/app/services/llm/jobs.py index fdf8b829..d5f44735 100644 --- a/backend/app/services/llm/jobs.py +++ b/backend/app/services/llm/jobs.py @@ -17,7 +17,6 @@ ConfigBlob, LLMCallConfig, KaapiCompletionConfig, - Validator, ) from app.services.llm.guardrails import get_validators_config, run_guardrails_validation from app.services.llm.providers.registry import get_llm_provider @@ -208,14 +207,14 @@ def execute_job( config_blob = config.blob if config_blob is not None: - validator_configs: list[Validator] = [ + validator_config_ids: list[UUID] = [ *(config_blob.input_guardrails or []), *(config_blob.output_guardrails or []), ] - if validator_configs: + if validator_config_ids: input_guardrails, output_guardrails = get_validators_config( - validator_configs=validator_configs, + validator_config_ids=validator_config_ids, organization_id=organization_id, project_id=project_id, ) @@ -381,13 +380,11 @@ def execute_job( ) elif safe_output["success"]: - response.response.output.content.value = safe_output["data"][ - "safe_text" - ] + response.response.output.content.value = safe_output["data"]["safe_text"] if safe_output["data"]["rephrase_needed"] == True: callback_response = APIResponse.failure_response( - error=request.query.input, + error=output_text, metadata=request.request_metadata, ) return handle_job_error( @@ -395,7 +392,7 @@ def execute_job( ) else: - response.response.output.text = safe_output["error"] + response.response.output.content.value = safe_output["error"] callback_response = APIResponse.failure_response( error=safe_output["error"], diff --git a/backend/app/tests/services/llm/test_guardrails.py b/backend/app/tests/services/llm/test_guardrails.py index 2fd18812..de294fa3 100644 --- a/backend/app/tests/services/llm/test_guardrails.py +++ b/backend/app/tests/services/llm/test_guardrails.py @@ -4,7 +4,6 @@ import httpx from app.core.config import settings -from app.models.llm.request import Validator from app.services.llm.guardrails import ( get_validators_config, run_guardrails_validation, @@ -85,9 +84,7 @@ def test_run_guardrails_validation_serializes_validator_models(mock_client_cls) mock_client.post.return_value = mock_response mock_client_cls.return_value.__enter__.return_value = mock_client - run_guardrails_validation( - TEST_TEXT, [Validator(validator_config_id=123)], TEST_JOB_ID - ) + run_guardrails_validation(TEST_TEXT, [{"validator_config_id": 123}], TEST_JOB_ID) _, kwargs = mock_client.post.call_args assert kwargs["json"]["validators"] == [{"validator_config_id": 123}] @@ -95,10 +92,10 @@ def test_run_guardrails_validation_serializes_validator_models(mock_client_cls) @patch("app.services.llm.guardrails.httpx.Client") def test_get_validators_config_splits_input_output(mock_client_cls) -> None: - validator_configs = [ - Validator(validator_config_id=5), - Validator(validator_config_id=6), - Validator(validator_config_id=7), + validator_config_ids = [ + uuid.uuid4(), + uuid.uuid4(), + uuid.uuid4(), ] mock_response = MagicMock() @@ -117,11 +114,11 @@ def test_get_validators_config_splits_input_output(mock_client_cls) -> None: } mock_client = MagicMock() - mock_client.post.return_value = mock_response + mock_client.get.return_value = mock_response mock_client_cls.return_value.__enter__.return_value = mock_client input_guardrails, output_guardrails = get_validators_config( - validator_configs=validator_configs, + validator_config_ids=validator_config_ids, organization_id=1, project_id=1, ) @@ -130,8 +127,8 @@ def test_get_validators_config_splits_input_output(mock_client_cls) -> None: assert len(output_guardrails) == 1 assert all(g["stage"] == "input" for g in input_guardrails) assert all(g["stage"] == "output" for g in output_guardrails) - _, kwargs = mock_client.post.call_args - assert kwargs["json"] == [v.model_dump() for v in validator_configs] + _, kwargs = mock_client.get.call_args + assert kwargs["params"]["ids"] == [str(v) for v in validator_config_ids] @patch("app.services.llm.guardrails.httpx.Client") @@ -139,7 +136,7 @@ def test_get_validators_config_empty_short_circuits_without_http( mock_client_cls, ) -> None: input_guardrails, output_guardrails = get_validators_config( - validator_configs=[], + validator_config_ids=[], organization_id=1, project_id=1, ) From 646a46fceed00365ee5dabbd5681b2239e532409 Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Mon, 16 Feb 2026 16:46:28 +0530 Subject: [PATCH 18/35] renamed to list_validators_config --- backend/app/models/llm/request.py | 1 + backend/app/services/llm/guardrails.py | 4 ++-- backend/app/services/llm/jobs.py | 11 ++++++++--- backend/app/tests/services/llm/test_guardrails.py | 10 +++++----- backend/app/tests/services/llm/test_jobs.py | 14 ++++++++------ 5 files changed, 24 insertions(+), 16 deletions(-) diff --git a/backend/app/models/llm/request.py b/backend/app/models/llm/request.py index f3566d30..6b275d0b 100644 --- a/backend/app/models/llm/request.py +++ b/backend/app/models/llm/request.py @@ -209,6 +209,7 @@ def validate_params(self): Field(discriminator="provider"), ] + class ConfigBlob(SQLModel): """Raw JSON blob of config.""" diff --git a/backend/app/services/llm/guardrails.py b/backend/app/services/llm/guardrails.py index 68d973a0..b8b16338 100644 --- a/backend/app/services/llm/guardrails.py +++ b/backend/app/services/llm/guardrails.py @@ -60,7 +60,7 @@ def run_guardrails_validation( } -def get_validators_config( +def list_validators_config( validator_config_ids: list[UUID] | None, organization_id: int | None, project_id: int | None, @@ -130,7 +130,7 @@ def get_validators_config( except Exception as e: logger.error( - "[get_validators_config] Failed to fetch validator config. " + "[list_validators_config] Failed to fetch validator config. " f"validator_config_ids={validator_config_ids}, organization_id={organization_id}, project_id={project_id}, " f"endpoint={endpoint}, error={e}" ) diff --git a/backend/app/services/llm/jobs.py b/backend/app/services/llm/jobs.py index d5f44735..b61254cd 100644 --- a/backend/app/services/llm/jobs.py +++ b/backend/app/services/llm/jobs.py @@ -18,7 +18,10 @@ LLMCallConfig, KaapiCompletionConfig, ) -from app.services.llm.guardrails import get_validators_config, run_guardrails_validation +from app.services.llm.guardrails import ( + list_validators_config, + run_guardrails_validation, +) from app.services.llm.providers.registry import get_llm_provider from app.services.llm.mappers import transform_kaapi_config_to_native from app.services.llm.input_resolver import resolve_input, cleanup_temp_file @@ -213,7 +216,7 @@ def execute_job( ] if validator_config_ids: - input_guardrails, output_guardrails = get_validators_config( + input_guardrails, output_guardrails = list_validators_config( validator_config_ids=validator_config_ids, organization_id=organization_id, project_id=project_id, @@ -380,7 +383,9 @@ def execute_job( ) elif safe_output["success"]: - response.response.output.content.value = safe_output["data"]["safe_text"] + response.response.output.content.value = safe_output["data"][ + "safe_text" + ] if safe_output["data"]["rephrase_needed"] == True: callback_response = APIResponse.failure_response( diff --git a/backend/app/tests/services/llm/test_guardrails.py b/backend/app/tests/services/llm/test_guardrails.py index de294fa3..1eca1bc8 100644 --- a/backend/app/tests/services/llm/test_guardrails.py +++ b/backend/app/tests/services/llm/test_guardrails.py @@ -5,7 +5,7 @@ from app.core.config import settings from app.services.llm.guardrails import ( - get_validators_config, + list_validators_config, run_guardrails_validation, ) @@ -91,7 +91,7 @@ def test_run_guardrails_validation_serializes_validator_models(mock_client_cls) @patch("app.services.llm.guardrails.httpx.Client") -def test_get_validators_config_splits_input_output(mock_client_cls) -> None: +def test_list_validators_config_splits_input_output(mock_client_cls) -> None: validator_config_ids = [ uuid.uuid4(), uuid.uuid4(), @@ -117,7 +117,7 @@ def test_get_validators_config_splits_input_output(mock_client_cls) -> None: mock_client.get.return_value = mock_response mock_client_cls.return_value.__enter__.return_value = mock_client - input_guardrails, output_guardrails = get_validators_config( + input_guardrails, output_guardrails = list_validators_config( validator_config_ids=validator_config_ids, organization_id=1, project_id=1, @@ -132,10 +132,10 @@ def test_get_validators_config_splits_input_output(mock_client_cls) -> None: @patch("app.services.llm.guardrails.httpx.Client") -def test_get_validators_config_empty_short_circuits_without_http( +def test_list_validators_config_empty_short_circuits_without_http( mock_client_cls, ) -> None: - input_guardrails, output_guardrails = get_validators_config( + input_guardrails, output_guardrails = list_validators_config( validator_config_ids=[], organization_id=1, project_id=1, diff --git a/backend/app/tests/services/llm/test_jobs.py b/backend/app/tests/services/llm/test_jobs.py index 4dfc35f8..7f348e47 100644 --- a/backend/app/tests/services/llm/test_jobs.py +++ b/backend/app/tests/services/llm/test_jobs.py @@ -751,7 +751,7 @@ def test_guardrails_sanitize_input_before_provider( with ( patch("app.services.llm.jobs.run_guardrails_validation") as mock_guardrails, - patch("app.services.llm.jobs.get_validators_config") as mock_fetch_configs, + patch("app.services.llm.jobs.list_validators_config") as mock_fetch_configs, ): mock_guardrails.return_value = { "success": True, @@ -800,7 +800,7 @@ def test_guardrails_sanitize_output_after_provider( with ( patch("app.services.llm.jobs.run_guardrails_validation") as mock_guardrails, - patch("app.services.llm.jobs.get_validators_config") as mock_fetch_configs, + patch("app.services.llm.jobs.list_validators_config") as mock_fetch_configs, ): mock_guardrails.return_value = { "success": True, @@ -844,7 +844,7 @@ def test_guardrails_bypass_does_not_modify_input( with ( patch("app.services.llm.jobs.run_guardrails_validation") as mock_guardrails, - patch("app.services.llm.jobs.get_validators_config") as mock_fetch_configs, + patch("app.services.llm.jobs.list_validators_config") as mock_fetch_configs, ): mock_guardrails.return_value = { "success": True, @@ -885,7 +885,7 @@ def test_guardrails_validation_failure_blocks_job( with ( patch("app.services.llm.jobs.run_guardrails_validation") as mock_guardrails, - patch("app.services.llm.jobs.get_validators_config") as mock_fetch_configs, + patch("app.services.llm.jobs.list_validators_config") as mock_fetch_configs, ): mock_guardrails.return_value = { "success": False, @@ -923,7 +923,7 @@ def test_guardrails_rephrase_needed_blocks_job( with ( patch("app.services.llm.jobs.run_guardrails_validation") as mock_guardrails, - patch("app.services.llm.jobs.get_validators_config") as mock_fetch_configs, + patch("app.services.llm.jobs.list_validators_config") as mock_fetch_configs, ): mock_guardrails.return_value = { "success": True, @@ -963,7 +963,9 @@ def test_execute_job_fetches_validator_configs_from_blob_refs( env = job_env env["provider"].execute.return_value = (env["mock_llm_response"], None) - with patch("app.services.llm.jobs.get_validators_config") as mock_fetch_configs: + with patch( + "app.services.llm.jobs.list_validators_config" + ) as mock_fetch_configs: mock_fetch_configs.return_value = ([], []) request_data = { From a48ad897cb4d748150a82f47cd3df71d0e844b6c Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Mon, 16 Feb 2026 16:50:48 +0530 Subject: [PATCH 19/35] renamed to list_validators_config --- backend/app/models/llm/request.py | 1 + backend/app/services/llm/guardrails.py | 4 ++-- backend/app/services/llm/jobs.py | 11 ++++++++--- backend/app/tests/services/llm/test_guardrails.py | 10 +++++----- backend/app/tests/services/llm/test_jobs.py | 14 ++++++++------ 5 files changed, 24 insertions(+), 16 deletions(-) diff --git a/backend/app/models/llm/request.py b/backend/app/models/llm/request.py index f3566d30..6b275d0b 100644 --- a/backend/app/models/llm/request.py +++ b/backend/app/models/llm/request.py @@ -209,6 +209,7 @@ def validate_params(self): Field(discriminator="provider"), ] + class ConfigBlob(SQLModel): """Raw JSON blob of config.""" diff --git a/backend/app/services/llm/guardrails.py b/backend/app/services/llm/guardrails.py index 68d973a0..b8b16338 100644 --- a/backend/app/services/llm/guardrails.py +++ b/backend/app/services/llm/guardrails.py @@ -60,7 +60,7 @@ def run_guardrails_validation( } -def get_validators_config( +def list_validators_config( validator_config_ids: list[UUID] | None, organization_id: int | None, project_id: int | None, @@ -130,7 +130,7 @@ def get_validators_config( except Exception as e: logger.error( - "[get_validators_config] Failed to fetch validator config. " + "[list_validators_config] Failed to fetch validator config. " f"validator_config_ids={validator_config_ids}, organization_id={organization_id}, project_id={project_id}, " f"endpoint={endpoint}, error={e}" ) diff --git a/backend/app/services/llm/jobs.py b/backend/app/services/llm/jobs.py index d5f44735..b61254cd 100644 --- a/backend/app/services/llm/jobs.py +++ b/backend/app/services/llm/jobs.py @@ -18,7 +18,10 @@ LLMCallConfig, KaapiCompletionConfig, ) -from app.services.llm.guardrails import get_validators_config, run_guardrails_validation +from app.services.llm.guardrails import ( + list_validators_config, + run_guardrails_validation, +) from app.services.llm.providers.registry import get_llm_provider from app.services.llm.mappers import transform_kaapi_config_to_native from app.services.llm.input_resolver import resolve_input, cleanup_temp_file @@ -213,7 +216,7 @@ def execute_job( ] if validator_config_ids: - input_guardrails, output_guardrails = get_validators_config( + input_guardrails, output_guardrails = list_validators_config( validator_config_ids=validator_config_ids, organization_id=organization_id, project_id=project_id, @@ -380,7 +383,9 @@ def execute_job( ) elif safe_output["success"]: - response.response.output.content.value = safe_output["data"]["safe_text"] + response.response.output.content.value = safe_output["data"][ + "safe_text" + ] if safe_output["data"]["rephrase_needed"] == True: callback_response = APIResponse.failure_response( diff --git a/backend/app/tests/services/llm/test_guardrails.py b/backend/app/tests/services/llm/test_guardrails.py index de294fa3..1eca1bc8 100644 --- a/backend/app/tests/services/llm/test_guardrails.py +++ b/backend/app/tests/services/llm/test_guardrails.py @@ -5,7 +5,7 @@ from app.core.config import settings from app.services.llm.guardrails import ( - get_validators_config, + list_validators_config, run_guardrails_validation, ) @@ -91,7 +91,7 @@ def test_run_guardrails_validation_serializes_validator_models(mock_client_cls) @patch("app.services.llm.guardrails.httpx.Client") -def test_get_validators_config_splits_input_output(mock_client_cls) -> None: +def test_list_validators_config_splits_input_output(mock_client_cls) -> None: validator_config_ids = [ uuid.uuid4(), uuid.uuid4(), @@ -117,7 +117,7 @@ def test_get_validators_config_splits_input_output(mock_client_cls) -> None: mock_client.get.return_value = mock_response mock_client_cls.return_value.__enter__.return_value = mock_client - input_guardrails, output_guardrails = get_validators_config( + input_guardrails, output_guardrails = list_validators_config( validator_config_ids=validator_config_ids, organization_id=1, project_id=1, @@ -132,10 +132,10 @@ def test_get_validators_config_splits_input_output(mock_client_cls) -> None: @patch("app.services.llm.guardrails.httpx.Client") -def test_get_validators_config_empty_short_circuits_without_http( +def test_list_validators_config_empty_short_circuits_without_http( mock_client_cls, ) -> None: - input_guardrails, output_guardrails = get_validators_config( + input_guardrails, output_guardrails = list_validators_config( validator_config_ids=[], organization_id=1, project_id=1, diff --git a/backend/app/tests/services/llm/test_jobs.py b/backend/app/tests/services/llm/test_jobs.py index 4dfc35f8..7f348e47 100644 --- a/backend/app/tests/services/llm/test_jobs.py +++ b/backend/app/tests/services/llm/test_jobs.py @@ -751,7 +751,7 @@ def test_guardrails_sanitize_input_before_provider( with ( patch("app.services.llm.jobs.run_guardrails_validation") as mock_guardrails, - patch("app.services.llm.jobs.get_validators_config") as mock_fetch_configs, + patch("app.services.llm.jobs.list_validators_config") as mock_fetch_configs, ): mock_guardrails.return_value = { "success": True, @@ -800,7 +800,7 @@ def test_guardrails_sanitize_output_after_provider( with ( patch("app.services.llm.jobs.run_guardrails_validation") as mock_guardrails, - patch("app.services.llm.jobs.get_validators_config") as mock_fetch_configs, + patch("app.services.llm.jobs.list_validators_config") as mock_fetch_configs, ): mock_guardrails.return_value = { "success": True, @@ -844,7 +844,7 @@ def test_guardrails_bypass_does_not_modify_input( with ( patch("app.services.llm.jobs.run_guardrails_validation") as mock_guardrails, - patch("app.services.llm.jobs.get_validators_config") as mock_fetch_configs, + patch("app.services.llm.jobs.list_validators_config") as mock_fetch_configs, ): mock_guardrails.return_value = { "success": True, @@ -885,7 +885,7 @@ def test_guardrails_validation_failure_blocks_job( with ( patch("app.services.llm.jobs.run_guardrails_validation") as mock_guardrails, - patch("app.services.llm.jobs.get_validators_config") as mock_fetch_configs, + patch("app.services.llm.jobs.list_validators_config") as mock_fetch_configs, ): mock_guardrails.return_value = { "success": False, @@ -923,7 +923,7 @@ def test_guardrails_rephrase_needed_blocks_job( with ( patch("app.services.llm.jobs.run_guardrails_validation") as mock_guardrails, - patch("app.services.llm.jobs.get_validators_config") as mock_fetch_configs, + patch("app.services.llm.jobs.list_validators_config") as mock_fetch_configs, ): mock_guardrails.return_value = { "success": True, @@ -963,7 +963,9 @@ def test_execute_job_fetches_validator_configs_from_blob_refs( env = job_env env["provider"].execute.return_value = (env["mock_llm_response"], None) - with patch("app.services.llm.jobs.get_validators_config") as mock_fetch_configs: + with patch( + "app.services.llm.jobs.list_validators_config" + ) as mock_fetch_configs: mock_fetch_configs.return_value = ([], []) request_data = { From 43ae67743f6c797564408385c8a179461363a6c9 Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Mon, 16 Feb 2026 16:54:50 +0530 Subject: [PATCH 20/35] resolved comment --- backend/app/models/llm/request.py | 7 +++++-- backend/app/services/llm/guardrails.py | 19 ++++++++++++++++--- backend/app/services/llm/jobs.py | 13 +++++++++---- 3 files changed, 30 insertions(+), 9 deletions(-) diff --git a/backend/app/models/llm/request.py b/backend/app/models/llm/request.py index 6b275d0b..a50777fb 100644 --- a/backend/app/models/llm/request.py +++ b/backend/app/models/llm/request.py @@ -210,17 +210,20 @@ def validate_params(self): ] +class Validator(SQLModel): + validator_config_id: UUID + class ConfigBlob(SQLModel): """Raw JSON blob of config.""" completion: CompletionConfig = Field(..., description="Completion configuration") - input_guardrails: list[UUID] | None = Field( + input_guardrails: list[Validator] | None = Field( default=None, description="Guardrails applied to validate/sanitize the input before the LLM call", ) - output_guardrails: list[UUID] | None = Field( + output_guardrails: list[Validator] | None = Field( default=None, description="Guardrails applied to validate/sanitize the output after the LLM call", ) diff --git a/backend/app/services/llm/guardrails.py b/backend/app/services/llm/guardrails.py index b8b16338..705e0407 100644 --- a/backend/app/services/llm/guardrails.py +++ b/backend/app/services/llm/guardrails.py @@ -5,12 +5,15 @@ import httpx from app.core.config import settings +from app.models.llm.request import Validator logger = logging.getLogger(__name__) def run_guardrails_validation( - input_text: str, guardrail_config: list[dict[str, Any]], job_id: UUID + input_text: str, + guardrail_config: list[Validator | dict[str, Any]], + job_id: UUID, ) -> dict[str, Any]: """ Call the Kaapi guardrails service to validate and process input text. @@ -23,10 +26,15 @@ def run_guardrails_validation( Returns: JSON response from the guardrails service with validation results. """ + validators = [ + validator.model_dump() if isinstance(validator, Validator) else validator + for validator in guardrail_config + ] + payload = { "request_id": str(job_id), "input": input_text, - "validators": guardrail_config, + "validators": validators, } headers = { @@ -61,9 +69,9 @@ def run_guardrails_validation( def list_validators_config( - validator_config_ids: list[UUID] | None, organization_id: int | None, project_id: int | None, + validator_configs: list[Validator] | None, ) -> tuple[list[dict[str, Any]], list[dict[str, Any]]]: """ Fetch validator configurations by IDs and split by stage. @@ -71,6 +79,11 @@ def list_validators_config( Calls: GET /validators/configs/?organization_id={organization_id}&project_id={project_id}&ids={uuid} """ + validator_config_ids = [ + validator_config.validator_config_id + for validator_config in (validator_configs or []) + ] + if not validator_config_ids: return [], [] diff --git a/backend/app/services/llm/jobs.py b/backend/app/services/llm/jobs.py index b61254cd..613ee2e1 100644 --- a/backend/app/services/llm/jobs.py +++ b/backend/app/services/llm/jobs.py @@ -17,6 +17,7 @@ ConfigBlob, LLMCallConfig, KaapiCompletionConfig, + Validator, ) from app.services.llm.guardrails import ( list_validators_config, @@ -210,16 +211,16 @@ def execute_job( config_blob = config.blob if config_blob is not None: - validator_config_ids: list[UUID] = [ + validator_configs: list[Validator] = [ *(config_blob.input_guardrails or []), *(config_blob.output_guardrails or []), ] - if validator_config_ids: + if validator_configs: input_guardrails, output_guardrails = list_validators_config( - validator_config_ids=validator_config_ids, organization_id=organization_id, project_id=project_id, + validator_configs=validator_configs, ) if input_guardrails: @@ -287,7 +288,11 @@ def execute_job( # Create LLM call record before execution try: # Rebuild ConfigBlob with transformed native config - resolved_config_blob = ConfigBlob(completion=completion_config) + resolved_config_blob = ConfigBlob( + completion=completion_config, + input_guardrails=config_blob.input_guardrails, + output_guardrails=config_blob.output_guardrails, + ) llm_call = create_llm_call( session, From b9e656ece9af2aea57c990dd0dab3a7a65eff361 Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Mon, 16 Feb 2026 17:00:22 +0530 Subject: [PATCH 21/35] precommit --- backend/app/models/llm/request.py | 1 + 1 file changed, 1 insertion(+) diff --git a/backend/app/models/llm/request.py b/backend/app/models/llm/request.py index a50777fb..b90fb622 100644 --- a/backend/app/models/llm/request.py +++ b/backend/app/models/llm/request.py @@ -213,6 +213,7 @@ def validate_params(self): class Validator(SQLModel): validator_config_id: UUID + class ConfigBlob(SQLModel): """Raw JSON blob of config.""" From 48eb714ca6268c335afb02341c487d0b1fe849f5 Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Mon, 16 Feb 2026 17:22:03 +0530 Subject: [PATCH 22/35] removed redundant tests --- .../tests/api/routes/configs/test_config.py | 40 ++---------------- .../tests/api/routes/configs/test_version.py | 41 ------------------- 2 files changed, 3 insertions(+), 78 deletions(-) diff --git a/backend/app/tests/api/routes/configs/test_config.py b/backend/app/tests/api/routes/configs/test_config.py index 6cfc9d51..eee392e9 100644 --- a/backend/app/tests/api/routes/configs/test_config.py +++ b/backend/app/tests/api/routes/configs/test_config.py @@ -8,6 +8,9 @@ from app.tests.utils.test_data import create_test_config, create_test_project +TEST_UUID = uuid4() +TEST_UUID_STR = str(uuid4()) + def test_create_config_success( db: Session, client: TestClient, @@ -59,43 +62,6 @@ def test_create_config_success( ) -def test_create_config_with_guardrails_persists_validator_refs( - db: Session, - client: TestClient, - user_api_key: TestAuthContext, -) -> None: - config_data = { - "name": "test-llm-config-guardrails", - "description": "Config with guardrails", - "config_blob": { - "completion": { - "provider": "openai-native", - "type": "text", - "params": {"model": "gpt-4"}, - }, - "input_guardrails": [{"validator_config_id": 1}], - "output_guardrails": [{"validator_config_id": 2}], - }, - "commit_message": "Initial configuration", - } - - response = client.post( - f"{settings.API_V1_STR}/configs/", - headers={"X-API-KEY": user_api_key.key}, - json=config_data, - ) - - assert response.status_code == 201 - data = response.json() - assert data["success"] is True - assert data["data"]["version"]["config_blob"]["input_guardrails"] == [ - {"validator_config_id": 1} - ] - assert data["data"]["version"]["config_blob"]["output_guardrails"] == [ - {"validator_config_id": 2} - ] - - def test_create_config_empty_blob_fails( db: Session, client: TestClient, diff --git a/backend/app/tests/api/routes/configs/test_version.py b/backend/app/tests/api/routes/configs/test_version.py index 7acba795..1a368e6c 100644 --- a/backend/app/tests/api/routes/configs/test_version.py +++ b/backend/app/tests/api/routes/configs/test_version.py @@ -89,47 +89,6 @@ def test_create_version_nonexistent_config( assert response.status_code == 404 -def test_create_version_with_guardrails_persists_validator_refs( - db: Session, - client: TestClient, - user_api_key: TestAuthContext, -) -> None: - config = create_test_config( - db=db, - project_id=user_api_key.project_id, - name="test-config-guardrails", - ) - - version_data = { - "config_blob": { - "completion": { - "provider": "openai-native", - "type": "text", - "params": {"model": "gpt-4-turbo"}, - }, - "input_guardrails": [{"validator_config_id": 1}], - "output_guardrails": [{"validator_config_id": 2}], - }, - "commit_message": "Guardrails config", - } - - response = client.post( - f"{settings.API_V1_STR}/configs/{config.id}/versions", - headers={"X-API-KEY": user_api_key.key}, - json=version_data, - ) - - assert response.status_code == 201 - data = response.json() - assert data["success"] is True - assert data["data"]["config_blob"]["input_guardrails"] == [ - {"validator_config_id": 1} - ] - assert data["data"]["config_blob"]["output_guardrails"] == [ - {"validator_config_id": 2} - ] - - def test_create_version_empty_blob_creates_noop_version( db: Session, client: TestClient, From 7d29f21616c6ae4da47af1b50bb5803f98390d73 Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Mon, 16 Feb 2026 17:23:19 +0530 Subject: [PATCH 23/35] removed tests --- .../tests/api/routes/configs/test_config.py | 3 -- .../tests/api/routes/configs/test_version.py | 28 ------------------- 2 files changed, 31 deletions(-) diff --git a/backend/app/tests/api/routes/configs/test_config.py b/backend/app/tests/api/routes/configs/test_config.py index eee392e9..5ff36b25 100644 --- a/backend/app/tests/api/routes/configs/test_config.py +++ b/backend/app/tests/api/routes/configs/test_config.py @@ -8,9 +8,6 @@ from app.tests.utils.test_data import create_test_config, create_test_project -TEST_UUID = uuid4() -TEST_UUID_STR = str(uuid4()) - def test_create_config_success( db: Session, client: TestClient, diff --git a/backend/app/tests/api/routes/configs/test_version.py b/backend/app/tests/api/routes/configs/test_version.py index 1a368e6c..b5a4ad41 100644 --- a/backend/app/tests/api/routes/configs/test_version.py +++ b/backend/app/tests/api/routes/configs/test_version.py @@ -89,34 +89,6 @@ def test_create_version_nonexistent_config( assert response.status_code == 404 -def test_create_version_empty_blob_creates_noop_version( - db: Session, - client: TestClient, - user_api_key: TestAuthContext, -) -> None: - """Empty partial update still creates a new version by inheriting previous blob.""" - config = create_test_config( - db=db, - project_id=user_api_key.project_id, - name="test-config", - ) - - version_data = { - "config_blob": {}, - "commit_message": "Empty blob", - } - - response = client.post( - f"{settings.API_V1_STR}/configs/{config.id}/versions", - headers={"X-API-KEY": user_api_key.key}, - json=version_data, - ) - assert response.status_code == 201 - data = response.json() - assert data["success"] is True - assert data["data"]["version"] == 2 - - def test_create_version_different_project_fails( db: Session, client: TestClient, From cf1dbe8a5758285ddbc9747574b6fe96191ac56d Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Mon, 16 Feb 2026 17:24:46 +0530 Subject: [PATCH 24/35] removed tests --- backend/app/tests/crud/config/test_config.py | 27 ------------------- backend/app/tests/crud/config/test_version.py | 24 ----------------- 2 files changed, 51 deletions(-) diff --git a/backend/app/tests/crud/config/test_config.py b/backend/app/tests/crud/config/test_config.py index fe0caed3..1a453f90 100644 --- a/backend/app/tests/crud/config/test_config.py +++ b/backend/app/tests/crud/config/test_config.py @@ -59,33 +59,6 @@ def test_create_config(db: Session, example_config_blob: ConfigBlob) -> None: assert version.commit_message == "Initial version" -def test_create_config_with_guardrails_persists_validator_refs( - db: Session, -) -> None: - project = create_test_project(db) - config_crud = ConfigCrud(session=db, project_id=project.id) - - config_create = ConfigCreate( - name=f"test-config-{random_lower_string()}", - description="Test configuration", - config_blob=ConfigBlob( - completion=NativeCompletionConfig( - provider="openai-native", - type="text", - params={"model": "gpt-4"}, - ), - input_guardrails=[{"validator_config_id": 1}], - output_guardrails=[{"validator_config_id": 2}], - ), - commit_message="Initial version", - ) - - _, version = config_crud.create_or_raise(config_create) - - assert version.config_blob["input_guardrails"] == [{"validator_config_id": 1}] - assert version.config_blob["output_guardrails"] == [{"validator_config_id": 2}] - - def test_create_config_duplicate_name( db: Session, example_config_blob: ConfigBlob ) -> None: diff --git a/backend/app/tests/crud/config/test_version.py b/backend/app/tests/crud/config/test_version.py index dc983c47..cbdc13f8 100644 --- a/backend/app/tests/crud/config/test_version.py +++ b/backend/app/tests/crud/config/test_version.py @@ -51,30 +51,6 @@ def test_create_version(db: Session, example_config_blob: ConfigBlob) -> None: assert version.deleted_at is None -def test_create_version_with_guardrails_persists_validator_refs( - db: Session, -) -> None: - config = create_test_config(db) - version_crud = ConfigVersionCrud( - session=db, - project_id=config.project_id, - config_id=config.id, - ) - - version_update = ConfigVersionUpdate( - config_blob={ - "input_guardrails": [{"validator_config_id": 1}], - "output_guardrails": [{"validator_config_id": 2}], - }, - commit_message="Guardrails version", - ) - - version = version_crud.create_or_raise(version_update) - - assert version.config_blob["input_guardrails"] == [{"validator_config_id": 1}] - assert version.config_blob["output_guardrails"] == [{"validator_config_id": 2}] - - def test_create_version_auto_increment( db: Session, example_config_blob: ConfigBlob ) -> None: From dd2892b7bd1b57a1aa4a3247a5ce7f7967a0e9d3 Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Mon, 16 Feb 2026 17:25:21 +0530 Subject: [PATCH 25/35] removed tests --- backend/app/tests/crud/config/test_config.py | 1 + backend/app/tests/crud/config/test_version.py | 1 + 2 files changed, 2 insertions(+) diff --git a/backend/app/tests/crud/config/test_config.py b/backend/app/tests/crud/config/test_config.py index 1a453f90..6fc9c7f1 100644 --- a/backend/app/tests/crud/config/test_config.py +++ b/backend/app/tests/crud/config/test_config.py @@ -1,4 +1,5 @@ from uuid import uuid4 + import pytest from sqlmodel import Session from fastapi import HTTPException diff --git a/backend/app/tests/crud/config/test_version.py b/backend/app/tests/crud/config/test_version.py index cbdc13f8..dfbe137a 100644 --- a/backend/app/tests/crud/config/test_version.py +++ b/backend/app/tests/crud/config/test_version.py @@ -1,4 +1,5 @@ from uuid import uuid4 + import pytest from sqlmodel import Session from fastapi import HTTPException From 0272dec6c5b577cf643f86b7f87cf604a7814745 Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Mon, 16 Feb 2026 17:42:47 +0530 Subject: [PATCH 26/35] fixed tests --- backend/app/services/llm/jobs.py | 11 ++-- .../app/tests/services/llm/test_guardrails.py | 16 ++--- backend/app/tests/services/llm/test_jobs.py | 58 ++++++++++++++----- 3 files changed, 61 insertions(+), 24 deletions(-) diff --git a/backend/app/services/llm/jobs.py b/backend/app/services/llm/jobs.py index b60abcd9..3b58f918 100644 --- a/backend/app/services/llm/jobs.py +++ b/backend/app/services/llm/jobs.py @@ -17,6 +17,8 @@ ConfigBlob, LLMCallConfig, KaapiCompletionConfig, + TextContent, + TextInput, Validator, ) from app.services.llm.guardrails import ( @@ -242,18 +244,19 @@ def execute_job( ) elif safe_input["success"]: - request.query.input = safe_input["data"]["safe_text"] + request.query.input.content.value = safe_input["data"]["safe_text"] if safe_input["data"]["rephrase_needed"]: callback_response = APIResponse.failure_response( - error=request.query.input, + error=safe_input["data"]["safe_text"], metadata=request.request_metadata, ) return handle_job_error( job_id, request.callback_url, callback_response ) else: - request.query.input = safe_input["error"] + # Update the text value with error message + request.query.input.content.value = safe_input["error"] callback_response = APIResponse.failure_response( error=safe_input["error"], @@ -398,7 +401,7 @@ def execute_job( if safe_output["data"]["rephrase_needed"] == True: callback_response = APIResponse.failure_response( - error=output_text, + error=request.query.input, metadata=request.request_metadata, ) return handle_job_error( diff --git a/backend/app/tests/services/llm/test_guardrails.py b/backend/app/tests/services/llm/test_guardrails.py index 1eca1bc8..f4293386 100644 --- a/backend/app/tests/services/llm/test_guardrails.py +++ b/backend/app/tests/services/llm/test_guardrails.py @@ -4,6 +4,7 @@ import httpx from app.core.config import settings +from app.models.llm.request import Validator from app.services.llm.guardrails import ( list_validators_config, run_guardrails_validation, @@ -92,10 +93,9 @@ def test_run_guardrails_validation_serializes_validator_models(mock_client_cls) @patch("app.services.llm.guardrails.httpx.Client") def test_list_validators_config_splits_input_output(mock_client_cls) -> None: - validator_config_ids = [ - uuid.uuid4(), - uuid.uuid4(), - uuid.uuid4(), + validator_configs = [ + Validator(validator_config_id=uuid.uuid4()), + Validator(validator_config_id=uuid.uuid4()), ] mock_response = MagicMock() @@ -118,7 +118,7 @@ def test_list_validators_config_splits_input_output(mock_client_cls) -> None: mock_client_cls.return_value.__enter__.return_value = mock_client input_guardrails, output_guardrails = list_validators_config( - validator_config_ids=validator_config_ids, + validator_configs=validator_configs, organization_id=1, project_id=1, ) @@ -128,7 +128,9 @@ def test_list_validators_config_splits_input_output(mock_client_cls) -> None: assert all(g["stage"] == "input" for g in input_guardrails) assert all(g["stage"] == "output" for g in output_guardrails) _, kwargs = mock_client.get.call_args - assert kwargs["params"]["ids"] == [str(v) for v in validator_config_ids] + assert kwargs["params"]["ids"] == [ + str(v.validator_config_id) for v in validator_configs + ] @patch("app.services.llm.guardrails.httpx.Client") @@ -136,7 +138,7 @@ def test_list_validators_config_empty_short_circuits_without_http( mock_client_cls, ) -> None: input_guardrails, output_guardrails = list_validators_config( - validator_config_ids=[], + validator_configs=[], organization_id=1, project_id=1, ) diff --git a/backend/app/tests/services/llm/test_jobs.py b/backend/app/tests/services/llm/test_jobs.py index 7f348e47..df9094f6 100644 --- a/backend/app/tests/services/llm/test_jobs.py +++ b/backend/app/tests/services/llm/test_jobs.py @@ -1,5 +1,6 @@ import pytest from unittest.mock import patch, MagicMock +from uuid import UUID, uuid4 from fastapi import HTTPException from sqlmodel import Session, select @@ -30,6 +31,9 @@ from app.tests.utils.utils import get_project from app.tests.utils.test_data import create_test_config +VALIDATOR_CONFIG_ID_1 = "00000000-0000-0000-0000-000000000001" +VALIDATOR_CONFIG_ID_2 = "00000000-0000-0000-0000-000000000002" + class TestStartJob: """Test cases for the start_job function.""" @@ -215,6 +219,15 @@ def test_handle_job_error_callback_failure_still_updates_job(self, db: Session): class TestExecuteJob: """Test suite for execute_job.""" + @pytest.fixture(autouse=True) + def mock_llm_call_crud(self): + with ( + patch("app.services.llm.jobs.create_llm_call") as mock_create_llm_call, + patch("app.services.llm.jobs.update_llm_call_response"), + ): + mock_create_llm_call.return_value = MagicMock(id=uuid4()) + yield + @pytest.fixture def job_for_execution(self, db: Session): job = JobCrud(session=db).create( @@ -775,7 +788,9 @@ def test_guardrails_sanitize_input_before_provider( "type": "text", "params": {"model": "gpt-4"}, }, - "input_guardrails": [{"validator_config_id": 1}], + "input_guardrails": [ + {"validator_config_id": VALIDATOR_CONFIG_ID_1} + ], "output_guardrails": [], } }, @@ -825,7 +840,9 @@ def test_guardrails_sanitize_output_after_provider( "params": {"model": "gpt-4"}, }, "input_guardrails": [], - "output_guardrails": [{"validator_config_id": 2}], + "output_guardrails": [ + {"validator_config_id": VALIDATOR_CONFIG_ID_2} + ], } }, } @@ -868,7 +885,9 @@ def test_guardrails_bypass_does_not_modify_input( "type": "text", "params": {"model": "gpt-4"}, }, - "input_guardrails": [{"validator_config_id": 1}], + "input_guardrails": [ + {"validator_config_id": VALIDATOR_CONFIG_ID_1} + ], "output_guardrails": [], } }, @@ -905,7 +924,9 @@ def test_guardrails_validation_failure_blocks_job( "type": "text", "params": {"model": "gpt-4"}, }, - "input_guardrails": [{"validator_config_id": 1}], + "input_guardrails": [ + {"validator_config_id": VALIDATOR_CONFIG_ID_1} + ], "output_guardrails": [], } }, @@ -947,7 +968,9 @@ def test_guardrails_rephrase_needed_blocks_job( "type": "text", "params": {"model": "gpt-4"}, }, - "input_guardrails": [{"validator_config_id": 1}], + "input_guardrails": [ + {"validator_config_id": VALIDATOR_CONFIG_ID_1} + ], "output_guardrails": [], } }, @@ -974,10 +997,15 @@ def test_execute_job_fetches_validator_configs_from_blob_refs( "blob": { "completion": { "provider": "openai-native", + "type": "text", "params": {"model": "gpt-4"}, }, - "input_guardrails": [{"validator_config_id": 1}], - "output_guardrails": [{"validator_config_id": 2}], + "input_guardrails": [ + {"validator_config_id": VALIDATOR_CONFIG_ID_1} + ], + "output_guardrails": [ + {"validator_config_id": VALIDATOR_CONFIG_ID_2} + ], } }, } @@ -987,7 +1015,10 @@ def test_execute_job_fetches_validator_configs_from_blob_refs( mock_fetch_configs.assert_called_once() _, kwargs = mock_fetch_configs.call_args validator_configs = kwargs["validator_configs"] - assert [v.validator_config_id for v in validator_configs] == [1, 2] + assert [v.validator_config_id for v in validator_configs] == [ + UUID(VALIDATOR_CONFIG_ID_1), + UUID(VALIDATOR_CONFIG_ID_2), + ] class TestResolveConfigBlob: @@ -1025,16 +1056,17 @@ def test_resolve_config_blob_keeps_validator_refs(self, db: Session): config_blob = ConfigBlob( completion=NativeCompletionConfig( provider="openai-native", + type="text", params={"model": "gpt-4"}, ), - input_guardrails=[{"validator_config_id": 1}], - output_guardrails=[{"validator_config_id": 2}], + input_guardrails=[{"validator_config_id": VALIDATOR_CONFIG_ID_1}], + output_guardrails=[{"validator_config_id": VALIDATOR_CONFIG_ID_2}], ) config = create_test_config(db, project_id=project.id, config_blob=config_blob) db.commit() config_crud = ConfigVersionCrud( - session=db, project_id=project.id, config_id=config.id, organization_id=1 + session=db, project_id=project.id, config_id=config.id ) llm_call_config = LLMCallConfig(id=str(config.id), version=1) @@ -1043,10 +1075,10 @@ def test_resolve_config_blob_keeps_validator_refs(self, db: Session): assert error is None assert resolved_blob is not None assert [v.model_dump() for v in (resolved_blob.input_guardrails or [])] == [ - {"validator_config_id": 1} + {"validator_config_id": UUID(VALIDATOR_CONFIG_ID_1)} ] assert [v.model_dump() for v in (resolved_blob.output_guardrails or [])] == [ - {"validator_config_id": 2} + {"validator_config_id": UUID(VALIDATOR_CONFIG_ID_2)} ] def test_resolve_config_blob_version_not_found(self, db: Session): From 7fe03e6358c5f46f226ea92dc3cd15ad59b7f81d Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Mon, 16 Feb 2026 17:56:16 +0530 Subject: [PATCH 27/35] fixed tests --- backend/app/crud/config/config.py | 2 +- backend/app/crud/config/version.py | 2 +- backend/app/services/llm/jobs.py | 6 ------ backend/app/tests/services/llm/test_guardrails.py | 5 +++-- 4 files changed, 5 insertions(+), 10 deletions(-) diff --git a/backend/app/crud/config/config.py b/backend/app/crud/config/config.py index 69d4bced..0a2ed213 100644 --- a/backend/app/crud/config/config.py +++ b/backend/app/crud/config/config.py @@ -47,7 +47,7 @@ def create_or_raise( version = ConfigVersion( config_id=config.id, version=1, - config_blob=config_create.config_blob.model_dump(), + config_blob=config_create.config_blob.model_dump(mode="json"), commit_message=config_create.commit_message, ) diff --git a/backend/app/crud/config/version.py b/backend/app/crud/config/version.py index 915d1b18..b3da74f1 100644 --- a/backend/app/crud/config/version.py +++ b/backend/app/crud/config/version.py @@ -79,7 +79,7 @@ def create_or_raise(self, version_create: ConfigVersionUpdate) -> ConfigVersion: version = ConfigVersion( config_id=self.config_id, version=next_version, - config_blob=validated_blob.model_dump(), + config_blob=validated_blob.model_dump(mode="json"), commit_message=version_create.commit_message, ) diff --git a/backend/app/services/llm/jobs.py b/backend/app/services/llm/jobs.py index 3b58f918..e81ef651 100644 --- a/backend/app/services/llm/jobs.py +++ b/backend/app/services/llm/jobs.py @@ -17,18 +17,12 @@ ConfigBlob, LLMCallConfig, KaapiCompletionConfig, - TextContent, - TextInput, Validator, ) from app.services.llm.guardrails import ( list_validators_config, run_guardrails_validation, ) -from app.services.llm.guardrails import ( - list_validators_config, - run_guardrails_validation, -) from app.services.llm.providers.registry import get_llm_provider from app.services.llm.mappers import transform_kaapi_config_to_native from app.services.llm.input_resolver import resolve_input, cleanup_temp_file diff --git a/backend/app/tests/services/llm/test_guardrails.py b/backend/app/tests/services/llm/test_guardrails.py index f4293386..1e1f1f4c 100644 --- a/backend/app/tests/services/llm/test_guardrails.py +++ b/backend/app/tests/services/llm/test_guardrails.py @@ -85,10 +85,11 @@ def test_run_guardrails_validation_serializes_validator_models(mock_client_cls) mock_client.post.return_value = mock_response mock_client_cls.return_value.__enter__.return_value = mock_client - run_guardrails_validation(TEST_TEXT, [{"validator_config_id": 123}], TEST_JOB_ID) + vid = uuid.uuid4() + run_guardrails_validation(TEST_TEXT, [Validator(validator_config_id=vid)], TEST_JOB_ID) _, kwargs = mock_client.post.call_args - assert kwargs["json"]["validators"] == [{"validator_config_id": 123}] + assert kwargs["json"]["validators"] == [{"validator_config_id": str(vid)}] @patch("app.services.llm.guardrails.httpx.Client") From 10ea155fc758d2df09b5d92b12f75000244b6f25 Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Mon, 16 Feb 2026 17:58:57 +0530 Subject: [PATCH 28/35] precommit --- backend/app/tests/services/llm/test_guardrails.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/backend/app/tests/services/llm/test_guardrails.py b/backend/app/tests/services/llm/test_guardrails.py index 1e1f1f4c..3a7fd485 100644 --- a/backend/app/tests/services/llm/test_guardrails.py +++ b/backend/app/tests/services/llm/test_guardrails.py @@ -86,7 +86,9 @@ def test_run_guardrails_validation_serializes_validator_models(mock_client_cls) mock_client_cls.return_value.__enter__.return_value = mock_client vid = uuid.uuid4() - run_guardrails_validation(TEST_TEXT, [Validator(validator_config_id=vid)], TEST_JOB_ID) + run_guardrails_validation( + TEST_TEXT, [Validator(validator_config_id=vid)], TEST_JOB_ID + ) _, kwargs = mock_client.post.call_args assert kwargs["json"]["validators"] == [{"validator_config_id": str(vid)}] From ce03f18bc04c65b1298eb9644b009af93c62d27b Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Tue, 17 Feb 2026 03:56:09 +0530 Subject: [PATCH 29/35] updated guardrails --- backend/app/services/llm/guardrails.py | 11 +++- backend/app/services/llm/jobs.py | 14 ++++- .../app/tests/services/llm/test_guardrails.py | 60 +++++++++++++++++-- 3 files changed, 78 insertions(+), 7 deletions(-) diff --git a/backend/app/services/llm/guardrails.py b/backend/app/services/llm/guardrails.py index 705e0407..412d8b61 100644 --- a/backend/app/services/llm/guardrails.py +++ b/backend/app/services/llm/guardrails.py @@ -14,6 +14,9 @@ def run_guardrails_validation( input_text: str, guardrail_config: list[Validator | dict[str, Any]], job_id: UUID, + project_id: int | None, + organization_id: int | None, + suppress_pass_logs: bool = True, ) -> dict[str, Any]: """ Call the Kaapi guardrails service to validate and process input text. @@ -22,6 +25,9 @@ def run_guardrails_validation( input_text: Text to validate and process. guardrail_config: List of validator configurations to apply. job_id: Unique identifier for the request. + project_id: Project identifier expected by guardrails API. + organization_id: Organization identifier expected by guardrails API. + suppress_pass_logs: Whether to suppress successful validation logs in guardrails service. Returns: JSON response from the guardrails service with validation results. @@ -33,6 +39,8 @@ def run_guardrails_validation( payload = { "request_id": str(job_id), + "project_id": project_id, + "organization_id": organization_id, "input": input_text, "validators": validators, } @@ -46,8 +54,9 @@ def run_guardrails_validation( try: with httpx.Client(timeout=10.0) as client: response = client.post( - settings.KAAPI_GUARDRAILS_URL, + f"{settings.KAAPI_GUARDRAILS_URL}/", json=payload, + params={"suppress_pass_logs": str(suppress_pass_logs).lower()}, headers=headers, ) diff --git a/backend/app/services/llm/jobs.py b/backend/app/services/llm/jobs.py index e81ef651..26100c53 100644 --- a/backend/app/services/llm/jobs.py +++ b/backend/app/services/llm/jobs.py @@ -225,7 +225,12 @@ def execute_job( if input_guardrails: safe_input = run_guardrails_validation( - request.query.input, input_guardrails, job_id + request.query.input.content.value, + input_guardrails, + job_id, + project_id, + organization_id, + suppress_pass_logs=True, ) logger.info( @@ -376,7 +381,12 @@ def execute_job( if output_guardrails: output_text = response.response.output.content.value safe_output = run_guardrails_validation( - output_text, output_guardrails, job_id + output_text, + output_guardrails, + job_id, + project_id, + organization_id, + suppress_pass_logs=True, ) logger.info( diff --git a/backend/app/tests/services/llm/test_guardrails.py b/backend/app/tests/services/llm/test_guardrails.py index 3a7fd485..3aa828d9 100644 --- a/backend/app/tests/services/llm/test_guardrails.py +++ b/backend/app/tests/services/llm/test_guardrails.py @@ -14,6 +14,8 @@ TEST_JOB_ID = uuid.uuid4() TEST_TEXT = "hello world" TEST_CONFIG = [{"type": "pii_remover"}] +TEST_PROJECT_ID = 1 +TEST_ORGANIZATION_ID = 1 @patch("app.services.llm.guardrails.httpx.Client") @@ -26,7 +28,13 @@ def test_run_guardrails_validation_success(mock_client_cls) -> None: mock_client.post.return_value = mock_response mock_client_cls.return_value.__enter__.return_value = mock_client - result = run_guardrails_validation(TEST_TEXT, TEST_CONFIG, TEST_JOB_ID) + result = run_guardrails_validation( + TEST_TEXT, + TEST_CONFIG, + TEST_JOB_ID, + TEST_PROJECT_ID, + TEST_ORGANIZATION_ID, + ) assert result == {"success": True} mock_client.post.assert_called_once() @@ -35,6 +43,9 @@ def test_run_guardrails_validation_success(mock_client_cls) -> None: assert kwargs["json"]["input"] == TEST_TEXT assert kwargs["json"]["validators"] == TEST_CONFIG assert kwargs["json"]["request_id"] == str(TEST_JOB_ID) + assert kwargs["json"]["project_id"] == TEST_PROJECT_ID + assert kwargs["json"]["organization_id"] == TEST_ORGANIZATION_ID + assert kwargs["params"]["suppress_pass_logs"] == "true" assert kwargs["headers"]["Authorization"].startswith("Bearer ") assert kwargs["headers"]["Content-Type"] == "application/json" @@ -50,7 +61,13 @@ def test_run_guardrails_validation_http_error_bypasses(mock_client_cls) -> None: mock_client.post.return_value = mock_response mock_client_cls.return_value.__enter__.return_value = mock_client - result = run_guardrails_validation(TEST_TEXT, TEST_CONFIG, TEST_JOB_ID) + result = run_guardrails_validation( + TEST_TEXT, + TEST_CONFIG, + TEST_JOB_ID, + TEST_PROJECT_ID, + TEST_ORGANIZATION_ID, + ) assert result["success"] is False assert result["bypassed"] is True @@ -67,7 +84,13 @@ def test_run_guardrails_validation_uses_settings(mock_client_cls) -> None: mock_client.post.return_value = mock_response mock_client_cls.return_value.__enter__.return_value = mock_client - run_guardrails_validation(TEST_TEXT, TEST_CONFIG, TEST_JOB_ID) + run_guardrails_validation( + TEST_TEXT, + TEST_CONFIG, + TEST_JOB_ID, + TEST_PROJECT_ID, + TEST_ORGANIZATION_ID, + ) _, kwargs = mock_client.post.call_args assert ( @@ -87,13 +110,42 @@ def test_run_guardrails_validation_serializes_validator_models(mock_client_cls) vid = uuid.uuid4() run_guardrails_validation( - TEST_TEXT, [Validator(validator_config_id=vid)], TEST_JOB_ID + TEST_TEXT, + [Validator(validator_config_id=vid)], + TEST_JOB_ID, + TEST_PROJECT_ID, + TEST_ORGANIZATION_ID, ) _, kwargs = mock_client.post.call_args assert kwargs["json"]["validators"] == [{"validator_config_id": str(vid)}] +@patch("app.services.llm.guardrails.httpx.Client") +def test_run_guardrails_validation_allows_disable_suppress_pass_logs( + mock_client_cls, +) -> None: + mock_response = MagicMock() + mock_response.raise_for_status.return_value = None + mock_response.json.return_value = {"success": True} + + mock_client = MagicMock() + mock_client.post.return_value = mock_response + mock_client_cls.return_value.__enter__.return_value = mock_client + + run_guardrails_validation( + TEST_TEXT, + TEST_CONFIG, + TEST_JOB_ID, + TEST_PROJECT_ID, + TEST_ORGANIZATION_ID, + suppress_pass_logs=False, + ) + + _, kwargs = mock_client.post.call_args + assert kwargs["params"]["suppress_pass_logs"] == "false" + + @patch("app.services.llm.guardrails.httpx.Client") def test_list_validators_config_splits_input_output(mock_client_cls) -> None: validator_configs = [ From 229fba0aab7ebfe849b84f7ca595944beda39080 Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Tue, 17 Feb 2026 04:14:45 +0530 Subject: [PATCH 30/35] resolved comment --- backend/app/services/llm/guardrails.py | 19 +++++----- backend/app/services/llm/jobs.py | 19 +++++++--- .../app/tests/services/llm/test_guardrails.py | 26 ++++++++++++++ backend/app/tests/services/llm/test_jobs.py | 36 +++++++++++++++++++ 4 files changed, 86 insertions(+), 14 deletions(-) diff --git a/backend/app/services/llm/guardrails.py b/backend/app/services/llm/guardrails.py index 412d8b61..da83be02 100644 --- a/backend/app/services/llm/guardrails.py +++ b/backend/app/services/llm/guardrails.py @@ -33,7 +33,9 @@ def run_guardrails_validation( JSON response from the guardrails service with validation results. """ validators = [ - validator.model_dump() if isinstance(validator, Validator) else validator + validator.model_dump(mode="json") + if isinstance(validator, Validator) + else validator for validator in guardrail_config ] @@ -103,19 +105,18 @@ def list_validators_config( } endpoint = f"{settings.KAAPI_GUARDRAILS_URL}/validators/configs/" + params = { + "organization_id": organization_id, + "project_id": project_id, + "ids": [str(validator_config_id) for validator_config_id in validator_config_ids], + } + params = {key: value for key, value in params.items() if value is not None} try: with httpx.Client(timeout=10.0) as client: response = client.get( endpoint, - params={ - "organization_id": organization_id, - "project_id": project_id, - "ids": [ - str(validator_config_id) - for validator_config_id in validator_config_ids - ], - }, + params=params, headers=headers, ) response.raise_for_status() diff --git a/backend/app/services/llm/jobs.py b/backend/app/services/llm/jobs.py index 26100c53..c3e5e314 100644 --- a/backend/app/services/llm/jobs.py +++ b/backend/app/services/llm/jobs.py @@ -217,11 +217,20 @@ def execute_job( ] if validator_configs: - input_guardrails, output_guardrails = list_validators_config( - organization_id=organization_id, - project_id=project_id, - validator_configs=validator_configs, - ) + try: + input_guardrails, output_guardrails = list_validators_config( + organization_id=organization_id, + project_id=project_id, + validator_configs=validator_configs, + ) + except Exception as e: + logger.error( + "[execute_job] Failed to fetch guardrail validator configs. " + "Proceeding without input/output guardrails for this job. " + f"job_id={job_id}, error={e}", + exc_info=True, + ) + input_guardrails, output_guardrails = [], [] if input_guardrails: safe_input = run_guardrails_validation( diff --git a/backend/app/tests/services/llm/test_guardrails.py b/backend/app/tests/services/llm/test_guardrails.py index 3aa828d9..50bf2eee 100644 --- a/backend/app/tests/services/llm/test_guardrails.py +++ b/backend/app/tests/services/llm/test_guardrails.py @@ -201,3 +201,29 @@ def test_list_validators_config_empty_short_circuits_without_http( assert input_guardrails == [] assert output_guardrails == [] mock_client_cls.assert_not_called() + + +@patch("app.services.llm.guardrails.httpx.Client") +def test_list_validators_config_omits_none_query_params(mock_client_cls) -> None: + validator_configs = [Validator(validator_config_id=uuid.uuid4())] + + mock_response = MagicMock() + mock_response.raise_for_status.return_value = None + mock_response.json.return_value = {"success": True, "data": []} + + mock_client = MagicMock() + mock_client.get.return_value = mock_response + mock_client_cls.return_value.__enter__.return_value = mock_client + + list_validators_config( + validator_configs=validator_configs, + organization_id=None, + project_id=None, + ) + + _, kwargs = mock_client.get.call_args + assert kwargs["params"]["ids"] == [ + str(v.validator_config_id) for v in validator_configs + ] + assert "organization_id" not in kwargs["params"] + assert "project_id" not in kwargs["params"] diff --git a/backend/app/tests/services/llm/test_jobs.py b/backend/app/tests/services/llm/test_jobs.py index df9094f6..65e3fbde 100644 --- a/backend/app/tests/services/llm/test_jobs.py +++ b/backend/app/tests/services/llm/test_jobs.py @@ -1020,6 +1020,42 @@ def test_execute_job_fetches_validator_configs_from_blob_refs( UUID(VALIDATOR_CONFIG_ID_2), ] + def test_execute_job_continues_when_validator_config_fetch_fails( + self, db, job_env, job_for_execution + ): + env = job_env + env["provider"].execute.return_value = (env["mock_llm_response"], None) + + with ( + patch("app.services.llm.jobs.list_validators_config") as mock_fetch_configs, + patch("app.services.llm.jobs.run_guardrails_validation") as mock_guardrails, + ): + mock_fetch_configs.side_effect = Exception("validator service unavailable") + + request_data = { + "query": {"input": "hello"}, + "config": { + "blob": { + "completion": { + "provider": "openai-native", + "type": "text", + "params": {"model": "gpt-4"}, + }, + "input_guardrails": [ + {"validator_config_id": VALIDATOR_CONFIG_ID_1} + ], + "output_guardrails": [ + {"validator_config_id": VALIDATOR_CONFIG_ID_2} + ], + } + }, + } + result = self._execute_job(job_for_execution, db, request_data) + + assert result["success"] is True + env["provider"].execute.assert_called_once() + mock_guardrails.assert_not_called() + class TestResolveConfigBlob: """Test suite for resolve_config_blob function.""" From 6be19232635fc91ab39730a304e8899aec750265 Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Tue, 17 Feb 2026 04:21:46 +0530 Subject: [PATCH 31/35] precommit --- backend/app/services/llm/guardrails.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/backend/app/services/llm/guardrails.py b/backend/app/services/llm/guardrails.py index da83be02..07919a11 100644 --- a/backend/app/services/llm/guardrails.py +++ b/backend/app/services/llm/guardrails.py @@ -108,7 +108,9 @@ def list_validators_config( params = { "organization_id": organization_id, "project_id": project_id, - "ids": [str(validator_config_id) for validator_config_id in validator_config_ids], + "ids": [ + str(validator_config_id) for validator_config_id in validator_config_ids + ], } params = {key: value for key, value in params.items() if value is not None} From 069993f0bc6da773333cf7702d775bef93b10cd1 Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Tue, 17 Feb 2026 17:37:26 +0530 Subject: [PATCH 32/35] added verify api --- backend/app/api/docs/api_keys/verify.md | 3 + backend/app/api/routes/api_keys.py | 25 +++++- backend/app/models/__init__.py | 8 +- backend/app/models/api_key.py | 8 ++ backend/app/tests/api/routes/test_api_key.py | 87 ++++++++++++++++++++ 5 files changed, 129 insertions(+), 2 deletions(-) create mode 100644 backend/app/api/docs/api_keys/verify.md diff --git a/backend/app/api/docs/api_keys/verify.md b/backend/app/api/docs/api_keys/verify.md new file mode 100644 index 00000000..2b3886d1 --- /dev/null +++ b/backend/app/api/docs/api_keys/verify.md @@ -0,0 +1,3 @@ +Verify the provided API key and return the resolved auth context. + +This endpoint validates the `X-API-KEY` header and returns `user_id`, `organization_id`, and `project_id` for the authenticated key. diff --git a/backend/app/api/routes/api_keys.py b/backend/app/api/routes/api_keys.py index 723eecc8..95e660f1 100644 --- a/backend/app/api/routes/api_keys.py +++ b/backend/app/api/routes/api_keys.py @@ -3,7 +3,12 @@ from app.api.deps import SessionDep, AuthContextDep from app.crud.api_key import APIKeyCrud -from app.models import APIKeyPublic, APIKeyCreateResponse, Message +from app.models import ( + APIKeyPublic, + APIKeyCreateResponse, + APIKeyVerifyResponse, + Message, +) from app.utils import APIResponse, load_description from app.api.permissions import Permission, require_permission @@ -71,3 +76,21 @@ def delete_api_key_route( api_key_crud.delete(key_id=key_id) return APIResponse.success_response(Message(message="API Key deleted successfully")) + + +@router.get( + "/verify", + response_model=APIResponse[APIKeyVerifyResponse], + dependencies=[Depends(require_permission(Permission.REQUIRE_PROJECT))], + description=load_description("api_keys/verify.md"), +) +def verify_api_key_route( + current_user: AuthContextDep, +): + return APIResponse.success_response( + APIKeyVerifyResponse( + user_id=current_user.user.id, + organization_id=current_user.organization_.id, + project_id=current_user.project_.id, + ) + ) diff --git a/backend/app/models/__init__.py b/backend/app/models/__init__.py index a0149a82..2c28d7b4 100644 --- a/backend/app/models/__init__.py +++ b/backend/app/models/__init__.py @@ -2,7 +2,13 @@ from .auth import AuthContext, Token, TokenPayload -from .api_key import APIKey, APIKeyBase, APIKeyPublic, APIKeyCreateResponse +from .api_key import ( + APIKey, + APIKeyBase, + APIKeyPublic, + APIKeyCreateResponse, + APIKeyVerifyResponse, +) from .assistants import Assistant, AssistantBase, AssistantCreate, AssistantUpdate diff --git a/backend/app/models/api_key.py b/backend/app/models/api_key.py index 516073f2..e8bd6c1b 100644 --- a/backend/app/models/api_key.py +++ b/backend/app/models/api_key.py @@ -45,6 +45,14 @@ class APIKeyCreateResponse(APIKeyPublic): key: str +class APIKeyVerifyResponse(SQLModel): + """Response model for API key verification.""" + + user_id: int + organization_id: int + project_id: int + + class APIKey(APIKeyBase, table=True): """Database model for API keys.""" diff --git a/backend/app/tests/api/routes/test_api_key.py b/backend/app/tests/api/routes/test_api_key.py index ee3231c0..dfcffa92 100644 --- a/backend/app/tests/api/routes/test_api_key.py +++ b/backend/app/tests/api/routes/test_api_key.py @@ -4,6 +4,7 @@ from sqlmodel import Session from app.core.config import settings +from app.models import Organization, Project, User from app.tests.utils.auth import TestAuthContext from app.tests.utils.test_data import create_test_api_key, create_test_project from app.tests.utils.user import create_random_user @@ -112,3 +113,89 @@ def test_delete_api_key_nonexistent( headers={"X-API-KEY": user_api_key.key}, ) assert response.status_code == 404 + + +def test_verify_api_key( + client: TestClient, + user_api_key: TestAuthContext, +) -> None: + """Test API key verification endpoint with a valid API key.""" + response = client.get( + f"{settings.API_V1_STR}/apikeys/verify", + headers={"X-API-KEY": user_api_key.key}, + ) + assert response.status_code == 200 + payload = response.json() + assert payload["success"] is True + assert payload["data"]["user_id"] == user_api_key.user_id + assert payload["data"]["organization_id"] == user_api_key.organization_id + assert payload["data"]["project_id"] == user_api_key.project_id + + +def test_verify_api_key_invalid_key(client: TestClient) -> None: + """Test API key verification endpoint with an invalid API key.""" + response = client.get( + f"{settings.API_V1_STR}/apikeys/verify", + headers={"X-API-KEY": "ApiKey InvalidKeyThatDoesNotExist123456789"}, + ) + assert response.status_code == 401 + + +def test_verify_api_key_missing_auth(client: TestClient) -> None: + """Test API key verification endpoint without any authentication.""" + response = client.get(f"{settings.API_V1_STR}/apikeys/verify") + assert response.status_code == 401 + + +def test_verify_api_key_inactive_user( + db: Session, + client: TestClient, +) -> None: + """Test API key verification fails when the user is inactive.""" + api_key = create_test_api_key(db) + user = db.get(User, api_key.user_id) + user.is_active = False + db.add(user) + db.commit() + + response = client.get( + f"{settings.API_V1_STR}/apikeys/verify", + headers={"X-API-KEY": api_key.key}, + ) + assert response.status_code == 403 + + +def test_verify_api_key_inactive_organization( + db: Session, + client: TestClient, +) -> None: + """Test API key verification fails when the organization is inactive.""" + api_key = create_test_api_key(db) + organization = db.get(Organization, api_key.organization_id) + organization.is_active = False + db.add(organization) + db.commit() + + response = client.get( + f"{settings.API_V1_STR}/apikeys/verify", + headers={"X-API-KEY": api_key.key}, + ) + assert response.status_code == 403 + + +def test_verify_api_key_inactive_project( + db: Session, + client: TestClient, +) -> None: + """Test API key verification fails when the project is inactive.""" + api_key = create_test_api_key(db) + project = db.get(Project, api_key.project_id) + project.is_active = False + db.add(project) + db.commit() + + response = client.get( + f"{settings.API_V1_STR}/apikeys/verify", + headers={"X-API-KEY": api_key.key}, + ) + assert response.status_code == 403 From 99f81f3a18fa58fcba64c23d0bd48a685f390f65 Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Tue, 17 Feb 2026 21:43:45 +0530 Subject: [PATCH 33/35] resolved comments --- backend/app/services/llm/guardrails.py | 97 +++++------ backend/app/services/llm/jobs.py | 157 +++++++++--------- .../app/tests/services/llm/test_guardrails.py | 82 +++++---- backend/app/tests/services/llm/test_jobs.py | 116 ++++++++++++- 4 files changed, 283 insertions(+), 169 deletions(-) diff --git a/backend/app/services/llm/guardrails.py b/backend/app/services/llm/guardrails.py index 07919a11..3adec3e2 100644 --- a/backend/app/services/llm/guardrails.py +++ b/backend/app/services/llm/guardrails.py @@ -82,20 +82,25 @@ def run_guardrails_validation( def list_validators_config( organization_id: int | None, project_id: int | None, - validator_configs: list[Validator] | None, + input_validator_configs: list[Validator] | None, + output_validator_configs: list[Validator] | None, ) -> tuple[list[dict[str, Any]], list[dict[str, Any]]]: """ - Fetch validator configurations by IDs and split by stage. + Fetch validator configurations by IDs for input and output guardrails. Calls: GET /validators/configs/?organization_id={organization_id}&project_id={project_id}&ids={uuid} """ - validator_config_ids = [ + input_validator_config_ids = [ validator_config.validator_config_id - for validator_config in (validator_configs or []) + for validator_config in (input_validator_configs or []) + ] + output_validator_config_ids = [ + validator_config.validator_config_id + for validator_config in (output_validator_configs or []) ] - if not validator_config_ids: + if not input_validator_config_ids and not output_validator_config_ids: return [], [] headers = { @@ -105,58 +110,54 @@ def list_validators_config( } endpoint = f"{settings.KAAPI_GUARDRAILS_URL}/validators/configs/" - params = { - "organization_id": organization_id, - "project_id": project_id, - "ids": [ - str(validator_config_id) for validator_config_id in validator_config_ids - ], - } - params = {key: value for key, value in params.items() if value is not None} + def _build_params(validator_ids: list[UUID]) -> dict[str, Any]: + params = { + "organization_id": organization_id, + "project_id": project_id, + "ids": [str(validator_config_id) for validator_config_id in validator_ids], + } + return {key: value for key, value in params.items() if value is not None} try: with httpx.Client(timeout=10.0) as client: - response = client.get( - endpoint, - params=params, - headers=headers, - ) - response.raise_for_status() - - payload = response.json() - if not isinstance(payload, dict): - raise ValueError( - "Invalid validators response format: expected JSON object." + def _fetch_by_ids(validator_ids: list[UUID]) -> list[dict[str, Any]]: + if not validator_ids: + return [] + + response = client.get( + endpoint, + params=_build_params(validator_ids), + headers=headers, ) + response.raise_for_status() - if not payload.get("success", False): - raise ValueError("Validator config fetch failed: `success` is false.") + payload = response.json() + if not isinstance(payload, dict): + raise ValueError( + "Invalid validators response format: expected JSON object." + ) - validators = payload.get("data", []) - if not isinstance(validators, list): - raise ValueError( - "Invalid validators response format: `data` must be a list." - ) + if not payload.get("success", False): + raise ValueError("Validator config fetch failed: `success` is false.") - input_guardrails = [ - validator - for validator in validators - if isinstance(validator, dict) - and str(validator.get("stage", "")).lower() == "input" - ] - output_guardrails = [ - validator - for validator in validators - if isinstance(validator, dict) - and str(validator.get("stage", "")).lower() == "output" - ] + validators = payload.get("data", []) + if not isinstance(validators, list): + raise ValueError( + "Invalid validators response format: `data` must be a list." + ) + return [validator for validator in validators if isinstance(validator, dict)] + + input_guardrails = _fetch_by_ids(input_validator_config_ids) + output_guardrails = _fetch_by_ids(output_validator_config_ids) return input_guardrails, output_guardrails except Exception as e: - logger.error( - "[list_validators_config] Failed to fetch validator config. " - f"validator_config_ids={validator_config_ids}, organization_id={organization_id}, project_id={project_id}, " - f"endpoint={endpoint}, error={e}" + logger.warning( + "[list_validators_config] Guardrails service unavailable or invalid response. " + "Proceeding without input/output guardrails. " + f"input_validator_config_ids={input_validator_config_ids}, output_validator_config_ids={output_validator_config_ids}, " + f"organization_id={organization_id}, " + f"project_id={project_id}, endpoint={endpoint}, error={e}" ) - raise + return [], [] diff --git a/backend/app/services/llm/jobs.py b/backend/app/services/llm/jobs.py index c3e5e314..c6997a08 100644 --- a/backend/app/services/llm/jobs.py +++ b/backend/app/services/llm/jobs.py @@ -17,8 +17,9 @@ ConfigBlob, LLMCallConfig, KaapiCompletionConfig, - Validator, + TextInput, ) +from app.models.llm.response import TextOutput from app.services.llm.guardrails import ( list_validators_config, run_guardrails_validation, @@ -211,68 +212,54 @@ def execute_job( config_blob = config.blob if config_blob is not None: - validator_configs: list[Validator] = [ - *(config_blob.input_guardrails or []), - *(config_blob.output_guardrails or []), - ] - - if validator_configs: - try: - input_guardrails, output_guardrails = list_validators_config( - organization_id=organization_id, - project_id=project_id, - validator_configs=validator_configs, - ) - except Exception as e: - logger.error( - "[execute_job] Failed to fetch guardrail validator configs. " - "Proceeding without input/output guardrails for this job. " - f"job_id={job_id}, error={e}", - exc_info=True, - ) - input_guardrails, output_guardrails = [], [] + if config_blob.input_guardrails or config_blob.output_guardrails: + input_guardrails, output_guardrails = list_validators_config( + organization_id=organization_id, + project_id=project_id, + input_validator_configs=config_blob.input_guardrails, + output_validator_configs=config_blob.output_guardrails, + ) if input_guardrails: - safe_input = run_guardrails_validation( - request.query.input.content.value, - input_guardrails, - job_id, - project_id, - organization_id, - suppress_pass_logs=True, - ) - - logger.info( - f"[execute_job] Input guardrail validation | success={safe_input['success']}." - ) + if not isinstance(request.query.input, TextInput): + logger.info( + "[execute_job] Skipping input guardrails for non-text input. " + f"job_id={job_id}, input_type={getattr(request.query.input, 'type', type(request.query.input).__name__)}" + ) + else: + safe_input = run_guardrails_validation( + request.query.input.content.value, + input_guardrails, + job_id, + project_id, + organization_id, + suppress_pass_logs=True, + ) - if safe_input.get("bypassed"): logger.info( - "[execute_job] Guardrails bypassed (service unavailable)" + f"[execute_job] Input guardrail validation | success={safe_input['success']}." ) - elif safe_input["success"]: - request.query.input.content.value = safe_input["data"]["safe_text"] + if safe_input.get("bypassed"): + logger.info( + "[execute_job] Guardrails bypassed (service unavailable)" + ) + + elif safe_input["success"]: + request.query.input.content.value = safe_input["data"][ + "safe_text" + ] + else: + # Update the text value with error message + request.query.input.content.value = safe_input["error"] - if safe_input["data"]["rephrase_needed"]: callback_response = APIResponse.failure_response( - error=safe_input["data"]["safe_text"], + error=safe_input["error"], metadata=request.request_metadata, ) return handle_job_error( job_id, request.callback_url, callback_response ) - else: - # Update the text value with error message - request.query.input.content.value = safe_input["error"] - - callback_response = APIResponse.failure_response( - error=safe_input["error"], - metadata=request.request_metadata, - ) - return handle_job_error( - job_id, request.callback_url, callback_response - ) user_sent_config_provider = "" try: @@ -388,50 +375,56 @@ def execute_job( if response: if output_guardrails: - output_text = response.response.output.content.value - safe_output = run_guardrails_validation( - output_text, - output_guardrails, - job_id, - project_id, - organization_id, - suppress_pass_logs=True, - ) - - logger.info( - f"[execute_job] Output guardrail validation | success={safe_output['success']}." - ) + if not isinstance(response.response.output, TextOutput): + logger.info( + "[execute_job] Skipping output guardrails for non-text output. " + f"job_id={job_id}, output_type={getattr(response.response.output, 'type', type(response.response.output).__name__)}" + ) + else: + output_text = response.response.output.content.value + safe_output = run_guardrails_validation( + output_text, + output_guardrails, + job_id, + project_id, + organization_id, + suppress_pass_logs=True, + ) - if safe_output.get("bypassed"): logger.info( - "[execute_job] Guardrails bypassed (service unavailable)" + f"[execute_job] Output guardrail validation | success={safe_output['success']}." ) - elif safe_output["success"]: - response.response.output.content.value = safe_output["data"][ - "safe_text" - ] + if safe_output.get("bypassed"): + logger.info( + "[execute_job] Guardrails bypassed (service unavailable)" + ) + + elif safe_output["success"]: + response.response.output.content.value = safe_output["data"][ + "safe_text" + ] + + if safe_output["data"]["rephrase_needed"] == True: + callback_response = APIResponse.failure_response( + error=request.query.input, + metadata=request.request_metadata, + ) + return handle_job_error( + job_id, request.callback_url, callback_response + ) + + else: + response.response.output.content.value = safe_output["error"] - if safe_output["data"]["rephrase_needed"] == True: callback_response = APIResponse.failure_response( - error=request.query.input, + error=safe_output["error"], metadata=request.request_metadata, ) return handle_job_error( job_id, request.callback_url, callback_response ) - else: - response.response.output.content.value = safe_output["error"] - - callback_response = APIResponse.failure_response( - error=safe_output["error"], - metadata=request.request_metadata, - ) - return handle_job_error( - job_id, request.callback_url, callback_response - ) - callback_response = APIResponse.success_response( data=response, metadata=request.request_metadata ) diff --git a/backend/app/tests/services/llm/test_guardrails.py b/backend/app/tests/services/llm/test_guardrails.py index 50bf2eee..4cb0544f 100644 --- a/backend/app/tests/services/llm/test_guardrails.py +++ b/backend/app/tests/services/llm/test_guardrails.py @@ -147,44 +147,45 @@ def test_run_guardrails_validation_allows_disable_suppress_pass_logs( @patch("app.services.llm.guardrails.httpx.Client") -def test_list_validators_config_splits_input_output(mock_client_cls) -> None: - validator_configs = [ - Validator(validator_config_id=uuid.uuid4()), - Validator(validator_config_id=uuid.uuid4()), - ] +def test_list_validators_config_fetches_input_and_output_by_refs(mock_client_cls) -> None: + input_validator_configs = [Validator(validator_config_id=uuid.uuid4())] + output_validator_configs = [Validator(validator_config_id=uuid.uuid4())] - mock_response = MagicMock() - mock_response.raise_for_status.return_value = None - mock_response.json.return_value = { + input_response = MagicMock() + input_response.raise_for_status.return_value = None + input_response.json.return_value = { "success": True, - "data": [ - {"type": "gender_assumption_bias", "stage": "output"}, - { - "type": "uli_slur_match", - "stage": "input", - "config": {"severity": "high"}, - }, - {"type": "pii_remover", "stage": "input"}, - ], + "data": [{"type": "uli_slur_match", "config": {"severity": "high"}}], + } + output_response = MagicMock() + output_response.raise_for_status.return_value = None + output_response.json.return_value = { + "success": True, + "data": [{"type": "gender_assumption_bias"}], } mock_client = MagicMock() - mock_client.get.return_value = mock_response + mock_client.get.side_effect = [input_response, output_response] mock_client_cls.return_value.__enter__.return_value = mock_client input_guardrails, output_guardrails = list_validators_config( - validator_configs=validator_configs, + input_validator_configs=input_validator_configs, + output_validator_configs=output_validator_configs, organization_id=1, project_id=1, ) - assert len(input_guardrails) == 2 - assert len(output_guardrails) == 1 - assert all(g["stage"] == "input" for g in input_guardrails) - assert all(g["stage"] == "output" for g in output_guardrails) - _, kwargs = mock_client.get.call_args - assert kwargs["params"]["ids"] == [ - str(v.validator_config_id) for v in validator_configs + assert input_guardrails == [{"type": "uli_slur_match", "config": {"severity": "high"}}] + assert output_guardrails == [{"type": "gender_assumption_bias"}] + assert mock_client.get.call_count == 2 + + first_call_kwargs = mock_client.get.call_args_list[0].kwargs + second_call_kwargs = mock_client.get.call_args_list[1].kwargs + assert first_call_kwargs["params"]["ids"] == [ + str(v.validator_config_id) for v in input_validator_configs + ] + assert second_call_kwargs["params"]["ids"] == [ + str(v.validator_config_id) for v in output_validator_configs ] @@ -193,7 +194,8 @@ def test_list_validators_config_empty_short_circuits_without_http( mock_client_cls, ) -> None: input_guardrails, output_guardrails = list_validators_config( - validator_configs=[], + input_validator_configs=[], + output_validator_configs=[], organization_id=1, project_id=1, ) @@ -205,7 +207,7 @@ def test_list_validators_config_empty_short_circuits_without_http( @patch("app.services.llm.guardrails.httpx.Client") def test_list_validators_config_omits_none_query_params(mock_client_cls) -> None: - validator_configs = [Validator(validator_config_id=uuid.uuid4())] + input_validator_configs = [Validator(validator_config_id=uuid.uuid4())] mock_response = MagicMock() mock_response.raise_for_status.return_value = None @@ -216,14 +218,34 @@ def test_list_validators_config_omits_none_query_params(mock_client_cls) -> None mock_client_cls.return_value.__enter__.return_value = mock_client list_validators_config( - validator_configs=validator_configs, + input_validator_configs=input_validator_configs, + output_validator_configs=[], organization_id=None, project_id=None, ) _, kwargs = mock_client.get.call_args assert kwargs["params"]["ids"] == [ - str(v.validator_config_id) for v in validator_configs + str(v.validator_config_id) for v in input_validator_configs ] assert "organization_id" not in kwargs["params"] assert "project_id" not in kwargs["params"] + + +@patch("app.services.llm.guardrails.httpx.Client") +def test_list_validators_config_network_error_fails_open(mock_client_cls) -> None: + input_validator_configs = [Validator(validator_config_id=uuid.uuid4())] + + mock_client = MagicMock() + mock_client.get.side_effect = httpx.ConnectError("Network is unreachable") + mock_client_cls.return_value.__enter__.return_value = mock_client + + input_guardrails, output_guardrails = list_validators_config( + input_validator_configs=input_validator_configs, + output_validator_configs=[], + organization_id=1, + project_id=1, + ) + + assert input_guardrails == [] + assert output_guardrails == [] diff --git a/backend/app/tests/services/llm/test_jobs.py b/backend/app/tests/services/llm/test_jobs.py index 65e3fbde..0fe0459c 100644 --- a/backend/app/tests/services/llm/test_jobs.py +++ b/backend/app/tests/services/llm/test_jobs.py @@ -18,6 +18,8 @@ Usage, TextOutput, TextContent, + AudioOutput, + AudioContent, # KaapiLLMParams, KaapiCompletionConfig, ) @@ -805,6 +807,52 @@ def test_guardrails_sanitize_input_before_provider( assert result["success"] + def test_guardrails_skip_input_validation_for_audio_input( + self, db, job_env, job_for_execution + ): + env = job_env + env["provider"].execute.return_value = (env["mock_llm_response"], None) + + with ( + patch("app.services.llm.jobs.run_guardrails_validation") as mock_guardrails, + patch("app.services.llm.jobs.list_validators_config") as mock_fetch_configs, + ): + mock_fetch_configs.return_value = ( + [{"type": "pii_remover", "stage": "input"}], + [], + ) + + request_data = { + "query": { + "input": { + "type": "audio", + "content": { + "format": "base64", + "value": "UklGRiQAAABXQVZFZm10IA==", + "mime_type": "audio/wav", + }, + } + }, + "config": { + "blob": { + "completion": { + "provider": "openai-native", + "type": "text", + "params": {"model": "gpt-4"}, + }, + "input_guardrails": [ + {"validator_config_id": VALIDATOR_CONFIG_ID_1} + ], + "output_guardrails": [], + } + }, + } + result = self._execute_job(job_for_execution, db, request_data) + + assert result["success"] is True + env["provider"].execute.assert_called_once() + mock_guardrails.assert_not_called() + def test_guardrails_sanitize_output_after_provider( self, db, job_env, job_for_execution ): @@ -850,6 +898,51 @@ def test_guardrails_sanitize_output_after_provider( assert "REDACTED" in result["data"]["response"]["output"]["content"]["value"] + def test_guardrails_skip_output_validation_for_audio_output( + self, db, job_env, job_for_execution + ): + env = job_env + + env["mock_llm_response"].response.output = AudioOutput( + content=AudioContent( + value="UklGRiQAAABXQVZFZm10IA==", + mime_type="audio/wav", + ) + ) + env["provider"].execute.return_value = (env["mock_llm_response"], None) + + with ( + patch("app.services.llm.jobs.run_guardrails_validation") as mock_guardrails, + patch("app.services.llm.jobs.list_validators_config") as mock_fetch_configs, + ): + mock_fetch_configs.return_value = ( + [], + [{"type": "safety_filter", "stage": "output"}], + ) + + request_data = { + "query": {"input": "hello"}, + "config": { + "blob": { + "completion": { + "provider": "openai-native", + "type": "text", + "params": {"model": "gpt-4"}, + }, + "input_guardrails": [], + "output_guardrails": [ + {"validator_config_id": VALIDATOR_CONFIG_ID_2} + ], + } + }, + } + result = self._execute_job(job_for_execution, db, request_data) + + assert result["success"] is True + assert result["data"]["response"]["output"]["type"] == "audio" + env["provider"].execute.assert_called_once() + mock_guardrails.assert_not_called() + def test_guardrails_bypass_does_not_modify_input( self, db, job_env, job_for_execution ): @@ -937,7 +1030,7 @@ def test_guardrails_validation_failure_blocks_job( assert "Unsafe content" in result["error"] env["provider"].execute.assert_not_called() - def test_guardrails_rephrase_needed_blocks_job( + def test_guardrails_rephrase_needed_allows_job_with_sanitized_input( self, db, job_env, job_for_execution ): env = job_env @@ -977,8 +1070,10 @@ def test_guardrails_rephrase_needed_blocks_job( } result = self._execute_job(job_for_execution, db, request_data) - assert not result["success"] - env["provider"].execute.assert_not_called() + assert result["success"] is True + env["provider"].execute.assert_called_once() + provider_query = env["provider"].execute.call_args[0][1] + assert provider_query.input.content.value == "Rephrased text" def test_execute_job_fetches_validator_configs_from_blob_refs( self, db, job_env, job_for_execution @@ -1014,13 +1109,16 @@ def test_execute_job_fetches_validator_configs_from_blob_refs( assert result["success"] mock_fetch_configs.assert_called_once() _, kwargs = mock_fetch_configs.call_args - validator_configs = kwargs["validator_configs"] - assert [v.validator_config_id for v in validator_configs] == [ - UUID(VALIDATOR_CONFIG_ID_1), - UUID(VALIDATOR_CONFIG_ID_2), + input_validator_configs = kwargs["input_validator_configs"] + output_validator_configs = kwargs["output_validator_configs"] + assert [v.validator_config_id for v in input_validator_configs] == [ + UUID(VALIDATOR_CONFIG_ID_1) + ] + assert [v.validator_config_id for v in output_validator_configs] == [ + UUID(VALIDATOR_CONFIG_ID_2) ] - def test_execute_job_continues_when_validator_config_fetch_fails( + def test_execute_job_continues_when_no_validator_configs_resolved( self, db, job_env, job_for_execution ): env = job_env @@ -1030,7 +1128,7 @@ def test_execute_job_continues_when_validator_config_fetch_fails( patch("app.services.llm.jobs.list_validators_config") as mock_fetch_configs, patch("app.services.llm.jobs.run_guardrails_validation") as mock_guardrails, ): - mock_fetch_configs.side_effect = Exception("validator service unavailable") + mock_fetch_configs.return_value = ([], []) request_data = { "query": {"input": "hello"}, From fc5424b2479777e2631cd8a92a9f3ab6c32e30bb Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Tue, 17 Feb 2026 22:02:03 +0530 Subject: [PATCH 34/35] precommit --- backend/app/services/llm/guardrails.py | 10 ++++++++-- backend/app/tests/services/llm/test_guardrails.py | 8 ++++++-- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/backend/app/services/llm/guardrails.py b/backend/app/services/llm/guardrails.py index 3adec3e2..7ba8d72f 100644 --- a/backend/app/services/llm/guardrails.py +++ b/backend/app/services/llm/guardrails.py @@ -110,6 +110,7 @@ def list_validators_config( } endpoint = f"{settings.KAAPI_GUARDRAILS_URL}/validators/configs/" + def _build_params(validator_ids: list[UUID]) -> dict[str, Any]: params = { "organization_id": organization_id, @@ -120,6 +121,7 @@ def _build_params(validator_ids: list[UUID]) -> dict[str, Any]: try: with httpx.Client(timeout=10.0) as client: + def _fetch_by_ids(validator_ids: list[UUID]) -> list[dict[str, Any]]: if not validator_ids: return [] @@ -138,7 +140,9 @@ def _fetch_by_ids(validator_ids: list[UUID]) -> list[dict[str, Any]]: ) if not payload.get("success", False): - raise ValueError("Validator config fetch failed: `success` is false.") + raise ValueError( + "Validator config fetch failed: `success` is false." + ) validators = payload.get("data", []) if not isinstance(validators, list): @@ -146,7 +150,9 @@ def _fetch_by_ids(validator_ids: list[UUID]) -> list[dict[str, Any]]: "Invalid validators response format: `data` must be a list." ) - return [validator for validator in validators if isinstance(validator, dict)] + return [ + validator for validator in validators if isinstance(validator, dict) + ] input_guardrails = _fetch_by_ids(input_validator_config_ids) output_guardrails = _fetch_by_ids(output_validator_config_ids) diff --git a/backend/app/tests/services/llm/test_guardrails.py b/backend/app/tests/services/llm/test_guardrails.py index 4cb0544f..16105698 100644 --- a/backend/app/tests/services/llm/test_guardrails.py +++ b/backend/app/tests/services/llm/test_guardrails.py @@ -147,7 +147,9 @@ def test_run_guardrails_validation_allows_disable_suppress_pass_logs( @patch("app.services.llm.guardrails.httpx.Client") -def test_list_validators_config_fetches_input_and_output_by_refs(mock_client_cls) -> None: +def test_list_validators_config_fetches_input_and_output_by_refs( + mock_client_cls, +) -> None: input_validator_configs = [Validator(validator_config_id=uuid.uuid4())] output_validator_configs = [Validator(validator_config_id=uuid.uuid4())] @@ -175,7 +177,9 @@ def test_list_validators_config_fetches_input_and_output_by_refs(mock_client_cls project_id=1, ) - assert input_guardrails == [{"type": "uli_slur_match", "config": {"severity": "high"}}] + assert input_guardrails == [ + {"type": "uli_slur_match", "config": {"severity": "high"}} + ] assert output_guardrails == [{"type": "gender_assumption_bias"}] assert mock_client.get.call_count == 2 From a1c934666eef04854302a0cee0b381dd6d1bfb64 Mon Sep 17 00:00:00 2001 From: rkritika1508 Date: Tue, 17 Feb 2026 22:07:42 +0530 Subject: [PATCH 35/35] fixed test --- backend/app/tests/services/llm/test_jobs.py | 1 + 1 file changed, 1 insertion(+) diff --git a/backend/app/tests/services/llm/test_jobs.py b/backend/app/tests/services/llm/test_jobs.py index 0fe0459c..27bb0384 100644 --- a/backend/app/tests/services/llm/test_jobs.py +++ b/backend/app/tests/services/llm/test_jobs.py @@ -1034,6 +1034,7 @@ def test_guardrails_rephrase_needed_allows_job_with_sanitized_input( self, db, job_env, job_for_execution ): env = job_env + env["provider"].execute.return_value = (env["mock_llm_response"], None) with ( patch("app.services.llm.jobs.run_guardrails_validation") as mock_guardrails,