Launch Week Day 1: Socket for Jira Is Now Available.Learn More
Socket
Book a DemoSign in
Socket

truthound

Package Overview
Dependencies
Maintainers
1
Versions
40
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

truthound - pypi Package Compare versions

Comparing version
3.1.0
to
3.1.1
docs/assets/truthound-dashboard-banner.png

Sorry, the diff of this file is not supported yet

+38
# Truthound 3.1.1 Release Notes
## Highlights
Truthound 3.1.1 is a patch release on top of the 3.1 line.
This release keeps the public AI review surface additive and stable while
exposing one small but important helper for downstream integrations that need
canonical source identity.
## What's New
- `truthound.ai.resolve_source_key(...)` is now a root-level public helper
- dashboard and other review-surface consumers can resolve canonical source
keys without reaching into `TruthoundContext` internals
- the rest of the `truthound.ai` lifecycle remains unchanged:
`suggest_suite(...)`, `explain_run(...)`, `approve_proposal(...)`,
`reject_proposal(...)`, `apply_proposal(...)`
## Compatibility
Truthound 3.1.1 is additive.
- importing `truthound` without AI dependencies still works
- `truthound[ai]` remains the optional installation path
- no core hot-path or workspace contract is broken by this patch release
## Upgrade Guidance
```bash
pip install --upgrade truthound
```
Optional AI support remains:
```bash
pip install --upgrade truthound[ai]
```
"""Public source-key helpers for ``truthound.ai`` consumers."""
from __future__ import annotations
from typing import Any
from truthound.context import TruthoundContext, get_context
def resolve_source_key(
data: Any = None,
source: Any = None,
*,
context: TruthoundContext | None = None,
) -> str:
"""Resolve the canonical source key for a dashboard or AI route consumer."""
active_context = context or get_context()
return active_context.resolve_source_key(data=data, source=source)
__all__ = ["resolve_source_key"]
from __future__ import annotations
import json
import shutil
from pathlib import Path
from typing import Any
import pytest
from pydantic import ValidationError
from typer.testing import CliRunner
import truthound as th
from truthound.cli import app
from truthound.context import TruthoundContext
pytest.importorskip("pydantic")
pytestmark = pytest.mark.contract
FIXTURE_ROOT = Path(__file__).parent / "fixtures" / "ai" / "phase4"
class FixtureProvider:
provider_name = "fake-openai"
api_key_env = None
supports_structured_outputs = True
default_model_name = "gpt-fake"
def __init__(self, payload: Any) -> None:
self.payload = payload
self.requests = []
def generate_structured(self, request):
from truthound.ai import StructuredProviderResponse
self.requests.append(request)
output_text = json.dumps(self.payload, ensure_ascii=False)
return StructuredProviderResponse(
provider_name=self.provider_name,
model_name=request.model_name,
output_text=output_text,
parsed_output=self.payload,
usage={"prompt_tokens": 11, "completion_tokens": 22, "total_tokens": 33},
finish_reason="stop",
)
def _load_json_fixture(name: str) -> Any:
return json.loads((FIXTURE_ROOT / name).read_text(encoding="utf-8"))
def _copy_orders_csv(tmp_path: Path) -> Path:
target = tmp_path / "orders-demo.csv"
shutil.copyfile(FIXTURE_ROOT / "orders-demo.csv", target)
return target
def _snapshot_core_state(context: TruthoundContext) -> dict[str, str]:
snapshot: dict[str, str] = {}
for directory in (context.catalog_dir, context.baselines_dir, context.runs_dir, context.docs_dir):
for path in sorted(directory.rglob("*")):
if path.is_file():
snapshot[str(path.relative_to(context.workspace_dir))] = path.read_text(encoding="utf-8")
return snapshot
def _normalize_source_key(value: str) -> str:
candidate = Path(value)
if candidate.name:
return candidate.name
return value
def _normalize_input_ref(ref: dict[str, Any]) -> dict[str, Any]:
payload = {
"kind": ref["kind"],
"ref": ref["ref"],
"redacted": ref["redacted"],
"metadata": ref["metadata"],
}
if payload["ref"].startswith("schema-summary:"):
payload["ref"] = f"schema-summary:{Path(payload['ref'].split(':', 1)[1]).name}"
elif payload["ref"].startswith("suite-summary:"):
payload["ref"] = f"suite-summary:{Path(payload['ref'].split(':', 1)[1]).name}"
elif payload["ref"].startswith("runs:"):
payload["ref"] = "runs:<run_id>"
elif payload["ref"].startswith("docs:"):
payload["ref"] = "docs:<run_id>"
return payload
def _normalize_proposal_snapshot(artifact) -> dict[str, Any]:
return {
"schema_version": str(artifact.schema_version),
"artifact_type": getattr(artifact.artifact_type, "value", str(artifact.artifact_type)),
"source_key": _normalize_source_key(str(artifact.source_key)),
"input_refs": [
_normalize_input_ref(item.model_dump(mode="json"))
for item in artifact.input_refs
],
"model_provider": str(artifact.model_provider),
"model_name": str(artifact.model_name),
"compiler_version": str(artifact.compiler_version),
"approval_status": getattr(artifact.approval_status, "value", str(artifact.approval_status)),
"target_type": str(artifact.target_type),
"summary": str(artifact.summary),
"rationale": str(artifact.rationale),
"compile_status": str(artifact.compile_status),
"compiled_check_count": int(artifact.compiled_check_count),
"rejected_check_count": int(artifact.rejected_check_count),
"compiled_checks": [
{
"validator_name": check.validator_name,
"category": check.category,
"columns": list(check.columns),
"params": dict(check.params),
}
for check in artifact.checks
],
"diff_counts": artifact.diff_preview.counts.model_dump(mode="json"),
"added_checks": [
{
"validator_name": item.validator_name,
"columns": list(item.columns),
}
for item in artifact.diff_preview.added
],
"already_present_validators": [
item.validator_name for item in artifact.diff_preview.already_present
],
"existing_suite_validator_names": [
item["validator_name"]
for item in (artifact.existing_suite_summary or {}).get("checks", [])
],
"risks": list(artifact.risks),
"compiler_errors": list(artifact.compiler_errors),
}
def _normalize_analysis_snapshot(artifact) -> dict[str, Any]:
input_refs = [
_normalize_input_ref(item.model_dump(mode="json"))
for item in artifact.input_refs
]
evidence_refs = [
"runs:<run_id>" if ref.startswith("runs:") else ref
for ref in artifact.evidence_refs
]
history_window = dict(artifact.history_window)
if history_window.get("latest_run_id"):
history_window["latest_run_id"] = "<run_id>"
return {
"schema_version": str(artifact.schema_version),
"artifact_type": getattr(artifact.artifact_type, "value", str(artifact.artifact_type)),
"source_key": str(artifact.source_key),
"input_refs": input_refs,
"model_provider": str(artifact.model_provider),
"model_name": str(artifact.model_name),
"compiler_version": str(artifact.compiler_version),
"approval_status": getattr(artifact.approval_status, "value", str(artifact.approval_status)),
"summary": str(artifact.summary),
"evidence_refs": evidence_refs,
"failed_checks": list(artifact.failed_checks),
"top_columns": list(artifact.top_columns),
"recommended_next_actions": list(artifact.recommended_next_actions),
"history_window": history_window,
}
def _make_ready_cli_proposal(tmp_path: Path, *, source_key: str):
from truthound._applied_suite import canonical_check_key
from truthound.ai import (
CompiledProposalCheck,
InputRef,
SuiteCheckSnapshot,
SuiteProposalArtifact,
ValidationSuiteDiffCounts,
ValidationSuiteDiffPreview,
ValidationSuiteSnapshot,
)
check_key = canonical_check_key(
validator_name="between",
columns=["refund_rate"],
params={"min_value": 0, "max_value": 1},
)
current_check = SuiteCheckSnapshot(
check_id="null",
check_key="null||{}",
validator_name="null",
category="completeness",
columns=[],
params={},
tags=["completeness"],
rationale="Always validate completeness across discovered columns.",
origin="current",
)
added_check = SuiteCheckSnapshot(
check_id=check_key,
check_key=check_key,
validator_name="between",
category="distribution",
columns=["refund_rate"],
params={"min_value": 0, "max_value": 1},
tags=["distribution"],
rationale="Refund rate should stay between zero and one.",
origin="proposal",
)
diff_preview = ValidationSuiteDiffPreview(
current_suite=ValidationSuiteSnapshot(
suite_name="truthound-auto-suite",
check_count=1,
schema_check_present=False,
evidence_mode="summary",
min_severity=None,
checks=[current_check],
),
proposed_suite=ValidationSuiteSnapshot(
suite_name="truthound-auto-suite",
check_count=2,
schema_check_present=False,
evidence_mode="summary",
min_severity=None,
checks=[current_check, added_check],
),
added=[added_check],
counts=ValidationSuiteDiffCounts(added=1, already_present=0, conflicts=0, rejected=0),
)
return SuiteProposalArtifact(
source_key=source_key,
input_refs=[
InputRef(
kind="schema_summary",
ref=f"schema-summary:{source_key}",
hash="schemahash-phase4",
redacted=True,
metadata={"column_count": 5, "observed_count": 3},
)
],
model_provider="fake-openai",
model_name="gpt-fake",
prompt_hash="phase4-cli-fingerprint",
created_by="phase4-cli-test",
workspace_root=str(tmp_path),
summary="CLI proposal for refund rate hardening.",
rationale="Exercise the operator review path end to end.",
checks=[
CompiledProposalCheck(
check_key=check_key,
validator_name="between",
category="distribution",
columns=["refund_rate"],
params={"min_value": 0, "max_value": 1},
rationale="Refund rate should stay between zero and one.",
)
],
risks=[],
compile_status="ready",
diff_preview=diff_preview,
compiled_check_count=1,
rejected_check_count=0,
compiler_errors=[],
)
def _make_cli_analysis(tmp_path: Path, *, run_id: str, source_key: str):
from truthound._ai_contract import analysis_artifact_id_for_run
from truthound.ai import InputRef, RunAnalysisArtifact
return RunAnalysisArtifact(
artifact_id=analysis_artifact_id_for_run(run_id),
source_key=source_key,
input_refs=[
InputRef(
kind="run_result",
ref=f"runs:{run_id}",
hash="runhash-phase4",
redacted=True,
metadata={"status": "failure", "issue_count": 2, "failed_check_count": 2},
),
InputRef(
kind="history_window",
ref=f"history-window:{source_key}",
hash="historyhash-phase4",
redacted=True,
metadata={"included": True, "run_count": 1, "failure_count": 1},
),
],
model_provider="fake-openai",
model_name="gpt-fake",
prompt_hash="phase4-analysis-fingerprint",
created_by="phase4-cli-test",
workspace_root=str(tmp_path),
run_id=run_id,
summary="CLI analysis highlights the duplicate identifier issue.",
evidence_refs=[f"runs:{run_id}", f"history-window:{source_key}"],
failed_checks=["unique", "null"],
top_columns=["customer_id", "email"],
recommended_next_actions=["Confirm whether customer_id is a stable business key."],
history_window={
"included": True,
"history_key": source_key,
"window_size": 10,
"run_count": 1,
"failure_count": 1,
"success_count": 0,
"latest_run_id": run_id,
"recent_statuses": ["failure"],
},
)
def test_phase4_compiler_golden_snapshot_matches_normalized_fixture(tmp_path: Path) -> None:
from truthound.ai import suggest_suite
data_path = _copy_orders_csv(tmp_path)
context = TruthoundContext(tmp_path)
artifact = suggest_suite(
prompt="Keep order ids unique and refund metrics bounded.",
data=str(data_path),
context=context,
provider=FixtureProvider(_load_json_fixture("happy_path_proposal_response.json")),
)
expected = _load_json_fixture("expected_normalized_proposal_snapshot.json")
assert _normalize_proposal_snapshot(artifact) == expected
def test_phase4_malformed_provider_output_is_safely_rejected_without_core_state_pollution(
tmp_path: Path,
) -> None:
from truthound.ai import suggest_suite
data_path = _copy_orders_csv(tmp_path)
context = TruthoundContext(tmp_path)
before = _snapshot_core_state(context)
artifact = suggest_suite(
prompt="Return malformed output on purpose.",
data=str(data_path),
context=context,
provider=FixtureProvider(_load_json_fixture("malformed_provider_output.json")),
)
assert str(artifact.compile_status) == "rejected"
assert artifact.compiled_check_count == 0
assert artifact.rejected_check_count == 0
assert artifact.diff_preview.counts.added == 0
assert artifact.rejected_items == []
assert artifact.compiler_errors == ["provider_output_validation_failed"]
assert _snapshot_core_state(context) == before
def test_phase4_unsupported_intent_and_unsafe_regex_are_safely_rejected(tmp_path: Path) -> None:
from truthound.ai import suggest_suite
data_path = _copy_orders_csv(tmp_path)
context = TruthoundContext(tmp_path)
before = _snapshot_core_state(context)
artifact = suggest_suite(
prompt="Keep order ids unique and reject unsafe suggestions.",
data=str(data_path),
context=context,
provider=FixtureProvider(_load_json_fixture("unsupported_or_unsafe_response.json")),
)
assert str(artifact.compile_status) == "partial"
assert artifact.compiled_check_count == 1
assert artifact.rejected_check_count == 3
assert artifact.diff_preview.counts.added == 1
reasons = {item.reason for item in artifact.rejected_items}
assert any("unsupported intent" in reason for reason in reasons)
assert any("regex pattern is not allowed" in reason for reason in reasons)
assert any(item.source == "model" for item in artifact.rejected_items)
assert _snapshot_core_state(context) == before
def test_phase4_analysis_golden_snapshot_matches_normalized_fixture(tmp_path: Path) -> None:
from truthound.ai import explain_run
context = TruthoundContext(tmp_path)
run_result = th.check(
{"customer_id": [1, 2, 2], "email": ["a@example.com", None, "c@example.com"]},
context=context,
)
payload = _load_json_fixture("happy_path_analysis_response.json")
payload["evidence_refs"] = [
f"runs:{run_result.run_id}",
f"history-window:{run_result.metadata['context_history_key']}",
]
artifact = explain_run(
run=run_result,
context=context,
provider=FixtureProvider(payload),
)
expected = _load_json_fixture("expected_normalized_analysis_snapshot.json")
assert _normalize_analysis_snapshot(artifact) == expected
def test_phase4_analysis_without_evidence_refs_is_rejected_and_not_persisted(tmp_path: Path) -> None:
from truthound.ai import AIArtifactStore, explain_run
from truthound.ai.providers import ProviderResponseError
context = TruthoundContext(tmp_path)
run_result = th.check(
{"customer_id": [1, 2, 2], "email": ["a@example.com", None, "c@example.com"]},
context=context,
)
with pytest.raises(ProviderResponseError, match="response validation failed"):
explain_run(
run=run_result,
context=context,
provider=FixtureProvider(_load_json_fixture("analysis_without_evidence_refs.json")),
)
assert AIArtifactStore(context).list_analyses() == []
def test_phase4_redaction_privacy_invariants_block_pii_and_sample_payloads(tmp_path: Path) -> None:
from truthound._ai_contract import analysis_artifact_id_for_run
from truthound.ai import (
InputRef,
RedactionViolationError,
RejectedProposalItem,
RunAnalysisArtifact,
SuiteProposalArtifact,
)
context = TruthoundContext(tmp_path)
with pytest.raises(ValidationError, match="text contains PII-like literal content"):
SuiteProposalArtifact(
source_key="source:orders",
input_refs=[
InputRef(
kind="schema_summary",
ref="schema-summary:source:orders",
hash="hash001",
redacted=True,
metadata={"column_count": 5, "observed_count": 3},
)
],
model_provider="fake-openai",
model_name="gpt-fake",
prompt_hash="phase4-redaction",
created_by="phase4-test",
workspace_root=str(tmp_path),
summary="Please investigate alice@example.com anomalies.",
rationale="This should never persist.",
compile_status="rejected",
rejected_items=[
RejectedProposalItem(
source="compiler",
intent="provider_output_validation_failed",
columns=[],
params={},
reason="Reject unsafe payloads",
)
],
)
with pytest.raises(ValidationError, match="row-level sample"):
RunAnalysisArtifact(
artifact_id=analysis_artifact_id_for_run("run-phase4"),
source_key="dict:customer_id:email",
input_refs=[
InputRef(
kind="run_result",
ref="runs:run-phase4",
hash="runhash",
redacted=True,
metadata={"status": "failure", "issue_count": 2, "failed_check_count": 2},
),
InputRef(
kind="history_window",
ref="history-window:dict:customer_id:email",
hash="historyhash",
redacted=True,
metadata={"included": True, "run_count": 1, "failure_count": 1},
),
],
model_provider="fake-openai",
model_name="gpt-fake",
prompt_hash="phase4-redaction",
created_by="phase4-test",
workspace_root=str(tmp_path),
run_id="run-phase4",
summary="Summary only",
evidence_refs=["runs:run-phase4"],
failed_checks=["unique"],
top_columns=["customer_id"],
recommended_next_actions=["row=[customer_id=2,status=pending]"],
history_window={
"included": True,
"history_key": "dict:customer_id:email",
"window_size": 10,
"run_count": 1,
"failure_count": 1,
"success_count": 0,
"latest_run_id": "run-phase4",
"recent_statuses": ["failure"],
},
)
def test_phase4_truthound_ai_cli_smoke_tracks_structured_side_effects(
tmp_path: Path,
monkeypatch: pytest.MonkeyPatch,
) -> None:
import truthound.ai as ai_namespace
from truthound.ai import AIArtifactStore
from truthound.cli_modules import ai as ai_cli_module
context = TruthoundContext(tmp_path)
source_path = _copy_orders_csv(tmp_path)
source_key = context.resolve_source_key(data=str(source_path))
run_result = th.check(
{"customer_id": [1, 2, 2], "email": ["a@example.com", None, "c@example.com"]},
context=context,
)
def fake_suggest_suite(*, prompt: str, data: Any = None, source: Any = None, context: Any = None, **_: Any):
artifact = _make_ready_cli_proposal(tmp_path, source_key=context.resolve_source_key(data=data, source=source))
AIArtifactStore(context).write_proposal(artifact)
return artifact
def fake_explain_run(*, run_id: str, context: Any = None, **_: Any):
artifact = _make_cli_analysis(
tmp_path,
run_id=run_id,
source_key=context.resolve_source_key(data=run_result.source),
)
AIArtifactStore(context).write_analysis(artifact)
return artifact
monkeypatch.setattr(ai_cli_module, "get_context", lambda: context)
monkeypatch.setattr(ai_namespace, "suggest_suite", fake_suggest_suite)
monkeypatch.setattr(ai_namespace, "explain_run", fake_explain_run)
runner = CliRunner()
suggest_result = runner.invoke(
app,
[
"ai",
"suggest-suite",
str(source_path),
"--prompt",
"Propose a refund-rate bound check.",
"--json",
],
)
assert suggest_result.exit_code == 0, suggest_result.stdout
proposal_payload = json.loads(suggest_result.stdout)
proposal_id = proposal_payload["artifact_id"]
list_result = runner.invoke(app, ["ai", "proposals", "list", "--json"])
assert list_result.exit_code == 0, list_result.stdout
assert json.loads(list_result.stdout)[0]["artifact_id"] == proposal_id
show_result = runner.invoke(app, ["ai", "proposals", "show", proposal_id, "--json"])
assert show_result.exit_code == 0, show_result.stdout
assert json.loads(show_result.stdout)["compile_status"] == "ready"
approve_result = runner.invoke(
app,
[
"ai",
"proposals",
"approve",
proposal_id,
"--actor-id",
"user-001",
"--actor-name",
"Truthound Operator",
"--comment",
"Looks safe to apply.",
"--json",
],
)
assert approve_result.exit_code == 0, approve_result.stdout
assert json.loads(approve_result.stdout)["proposal"]["approval_status"] == "approved"
apply_result = runner.invoke(
app,
[
"ai",
"proposals",
"apply",
proposal_id,
"--actor-id",
"user-001",
"--actor-name",
"Truthound Operator",
"--comment",
"Apply to the active suite.",
"--yes",
"--json",
],
)
assert apply_result.exit_code == 0, apply_result.stdout
assert json.loads(apply_result.stdout)["proposal"]["approval_status"] == "applied"
history_result = runner.invoke(app, ["ai", "proposals", "history", proposal_id, "--json"])
assert history_result.exit_code == 0, history_result.stdout
history = json.loads(history_result.stdout)
assert [item["action"] for item in history] == ["apply", "approve"]
explain_result = runner.invoke(
app,
[
"ai",
"explain-run",
"--run-id",
run_result.run_id,
"--json",
],
)
assert explain_result.exit_code == 0, explain_result.stdout
analysis_payload = json.loads(explain_result.stdout)
analysis_id = analysis_payload["artifact_id"]
analysis_show = runner.invoke(app, ["ai", "analyses", "show", analysis_id, "--json"])
assert analysis_show.exit_code == 0, analysis_show.stdout
assert json.loads(analysis_show.stdout)["artifact_id"] == analysis_id
approval_log = (context.workspace_dir / "ai" / "approvals" / "approval-log.jsonl")
assert approval_log.exists()
assert len(approval_log.read_text(encoding="utf-8").splitlines()) == 2
applied_suite = context.read_applied_suite(source_key=source_key)
assert applied_suite is not None
assert applied_suite.proposal_id == proposal_id
+0
-6

@@ -33,8 +33,2 @@ name: Docs

- name: Checkout truthound-dashboard docs source
uses: actions/checkout@v4
with:
repository: seadonggyun4/truthound-dashboard
path: .external/truthound-dashboard
- uses: actions/setup-python@v5

@@ -41,0 +35,0 @@ with:

+1
-1

@@ -65,2 +65,2 @@ # Truthound AI

- [Truthound Dashboard](../dashboard/index.md)
- [Truthound 3.1 Release Notes](../releases/truthound-3.1.md)
- [Truthound 3.1.1 Release Notes](../releases/truthound-3.1.1.md)
<div align="center">
<img width="560" alt="Truthound Banner" src="../assets/truthound_banner.png" />
<img width="720" alt="Truthound Dashboard Banner" src="../assets/truthound-dashboard-banner.png" />
</div>

@@ -4,0 +4,0 @@

@@ -113,3 +113,3 @@ <div align="center">

- [Truthound Dashboard](dashboard/index.md)
- [Release Notes](releases/truthound-3.1.md)
- [Release Notes](releases/truthound-3.1.1.md)
- [Migration to 3.0](guides/migration-3.0.md)
+14
-17
Metadata-Version: 2.4
Name: truthound
Version: 3.1.0
Version: 3.1.1
Summary: Zero-Configuration Data Quality Framework Powered by Polars

@@ -149,3 +149,3 @@ Project-URL: Homepage, https://github.com/seadonggyun4/Truthound

> Truthound 3.1.0 is a layered data quality system built around a Polars-first validation kernel, with first-party orchestration adapters, an additive AI review surface, and an operational console built on top of the same core runtime contract.
> Truthound 3.1.1 is a layered data quality system built around a Polars-first validation kernel, with first-party orchestration adapters, an additive AI review surface, and an operational console built on top of the same core runtime contract.

@@ -160,3 +160,3 @@ ---

Truthound 3.1.0 is a layered data quality system. At the center is a small, durable, Polars-first validation kernel. Around that core sit an additive `truthound.ai` review surface, Truthound Orchestration for host-native execution inside schedulers and workflow systems, and Truthound Dashboard for operating Truthound through an installation-managed control-plane UI.
Truthound 3.1.1 is a layered data quality system. At the center is a small, durable, Polars-first validation kernel. Around that core sit an additive `truthound.ai` review surface, Truthound Orchestration for host-native execution inside schedulers and workflow systems, and Truthound Dashboard for operating Truthound through an installation-managed control-plane UI.

@@ -167,16 +167,13 @@ The point of the 3.x reset is not to hide the broader product line. It is to make the system boundary honest. The core validation kernel is the most rigorously validated contract in the ecosystem, while the AI review layer, orchestration adapters, and dashboard build on top of that contract instead of redefining it.

## What's New In 3.1.0
## What's New In 3.1.1
Truthound 3.1.0 keeps the 3.0 kernel boundary and adds the first complete
public AI review surface.
Truthound 3.1.1 keeps the 3.1 review surface intact and tightens one small but
important public integration edge for downstream consumers.
- `truthound.ai` is now the canonical optional namespace for proposal
generation, run analysis, approval history, and controlled apply
- root feature probes `has_ai_support()` and `get_ai_support_status()` make it
safe for downstream integrations to feature-gate AI functionality
- the AI lifecycle is explicit: `suggest_suite(...)`, `explain_run(...)`,
`approve_proposal(...)`, `reject_proposal(...)`, `apply_proposal(...)`
- live smoke runners now exist for both proposal generation and run analysis
- the public docs portal now documents `Truthound AI` directly and keeps the
dashboard at a boundary-level overview instead of a mirrored manual
- `truthound.ai.resolve_source_key(...)` is now a root-level public helper for
dashboard and review-surface consumers
- downstream integrations can resolve canonical source identity without
reaching into `TruthoundContext` implementation details
- the additive AI boundary remains unchanged: optional install, explicit review
lifecycle, persisted proposal and analysis artifacts, and controlled apply

@@ -255,3 +252,3 @@ ## Truthound Product Line

The practical 3.1.0 additions on top of that kernel are:
The practical 3.1 additions on top of that kernel are:

@@ -410,3 +407,3 @@ - optional AI dependency bundle: `truthound[ai]`

- Dashboard layer: [truthound.netlify.app/dashboard/](https://truthound.netlify.app/dashboard/)
- Release notes: [docs/releases/truthound-3.1.md](docs/releases/truthound-3.1.md)
- Release notes: [docs/releases/truthound-3.1.1.md](docs/releases/truthound-3.1.1.md)
- Latest verified benchmark summary: [docs/releases/latest-benchmark-summary.md](docs/releases/latest-benchmark-summary.md)

@@ -413,0 +410,0 @@ - Migration guide: [docs/guides/migration-3.0.md](docs/guides/migration-3.0.md)

@@ -7,3 +7,3 @@ [build-system]

name = "truthound"
version = "3.1.0"
version = "3.1.1"
description = "Zero-Configuration Data Quality Framework Powered by Polars"

@@ -10,0 +10,0 @@ readme = "README.md"

+13
-16

@@ -27,3 +27,3 @@ <div align="center">

> Truthound 3.1.0 is a layered data quality system built around a Polars-first validation kernel, with first-party orchestration adapters, an additive AI review surface, and an operational console built on top of the same core runtime contract.
> Truthound 3.1.1 is a layered data quality system built around a Polars-first validation kernel, with first-party orchestration adapters, an additive AI review surface, and an operational console built on top of the same core runtime contract.

@@ -38,3 +38,3 @@ ---

Truthound 3.1.0 is a layered data quality system. At the center is a small, durable, Polars-first validation kernel. Around that core sit an additive `truthound.ai` review surface, Truthound Orchestration for host-native execution inside schedulers and workflow systems, and Truthound Dashboard for operating Truthound through an installation-managed control-plane UI.
Truthound 3.1.1 is a layered data quality system. At the center is a small, durable, Polars-first validation kernel. Around that core sit an additive `truthound.ai` review surface, Truthound Orchestration for host-native execution inside schedulers and workflow systems, and Truthound Dashboard for operating Truthound through an installation-managed control-plane UI.

@@ -45,16 +45,13 @@ The point of the 3.x reset is not to hide the broader product line. It is to make the system boundary honest. The core validation kernel is the most rigorously validated contract in the ecosystem, while the AI review layer, orchestration adapters, and dashboard build on top of that contract instead of redefining it.

## What's New In 3.1.0
## What's New In 3.1.1
Truthound 3.1.0 keeps the 3.0 kernel boundary and adds the first complete
public AI review surface.
Truthound 3.1.1 keeps the 3.1 review surface intact and tightens one small but
important public integration edge for downstream consumers.
- `truthound.ai` is now the canonical optional namespace for proposal
generation, run analysis, approval history, and controlled apply
- root feature probes `has_ai_support()` and `get_ai_support_status()` make it
safe for downstream integrations to feature-gate AI functionality
- the AI lifecycle is explicit: `suggest_suite(...)`, `explain_run(...)`,
`approve_proposal(...)`, `reject_proposal(...)`, `apply_proposal(...)`
- live smoke runners now exist for both proposal generation and run analysis
- the public docs portal now documents `Truthound AI` directly and keeps the
dashboard at a boundary-level overview instead of a mirrored manual
- `truthound.ai.resolve_source_key(...)` is now a root-level public helper for
dashboard and review-surface consumers
- downstream integrations can resolve canonical source identity without
reaching into `TruthoundContext` implementation details
- the additive AI boundary remains unchanged: optional install, explicit review
lifecycle, persisted proposal and analysis artifacts, and controlled apply

@@ -133,3 +130,3 @@ ## Truthound Product Line

The practical 3.1.0 additions on top of that kernel are:
The practical 3.1 additions on top of that kernel are:

@@ -288,3 +285,3 @@ - optional AI dependency bundle: `truthound[ai]`

- Dashboard layer: [truthound.netlify.app/dashboard/](https://truthound.netlify.app/dashboard/)
- Release notes: [docs/releases/truthound-3.1.md](docs/releases/truthound-3.1.md)
- Release notes: [docs/releases/truthound-3.1.1.md](docs/releases/truthound-3.1.1.md)
- Latest verified benchmark summary: [docs/releases/latest-benchmark-summary.md](docs/releases/latest-benchmark-summary.md)

@@ -291,0 +288,0 @@ - Migration guide: [docs/guides/migration-3.0.md](docs/guides/migration-3.0.md)

@@ -9,3 +9,2 @@ #!/bin/bash

# ./scripts/fetch-external-docs.sh orchestration # sync only orchestration
# ./scripts/fetch-external-docs.sh dashboard # sync only dashboard

@@ -22,3 +21,2 @@ set -e

"orchestration|https://github.com/seadonggyun4/truthound-orchestration.git|docs/orchestration|public_nav"
"dashboard|https://github.com/seadonggyun4/truthound-dashboard.git|docs/dashboard|docs_tree"
# Add new repos here:

@@ -25,0 +23,0 @@ # "new-repo|https://github.com/seadonggyun4/truthound-new-repo.git|docs/new-repo|docs_tree"

@@ -67,2 +67,3 @@ """Optional Truthound AI namespace.

)
from truthound.ai.resolution import resolve_source_key
from truthound.ai.redaction import (

@@ -130,2 +131,3 @@ RedactionViolation,

"reject_proposal",
"resolve_source_key",
"show_analysis",

@@ -132,0 +134,0 @@ "show_proposal",

@@ -167,2 +167,3 @@ from __future__ import annotations

assert callable(ai_namespace.list_proposal_approval_events)
assert callable(ai_namespace.resolve_source_key)
assert issubclass(ai_namespace.ProviderConfigurationError, Exception)

@@ -173,2 +174,13 @@ assert issubclass(ai_namespace.ProviderTransportError, Exception)

def test_root_ai_resolve_source_key_matches_truthound_context(tmp_path: Path):
import truthound.ai as ai_namespace
context = TruthoundContext(tmp_path)
data = {"order_id": [1, 2, 3], "status": ["pending", "approved", "pending"]}
assert ai_namespace.resolve_source_key(data=data, context=context) == context.resolve_source_key(
data=data
)
def test_proposal_lifecycle_transitions_and_history_are_recorded(tmp_path: Path):

@@ -175,0 +187,0 @@ from truthound.ai import AIArtifactStore, ActorRef, approve_proposal, list_proposal_approval_events, reject_proposal

@@ -360,2 +360,22 @@ from __future__ import annotations

@pytest.mark.contract
def test_docs_workflow_does_not_checkout_dashboard_repo() -> None:
workflow_path = (
Path(__file__).resolve().parents[1]
/ ".github"
/ "workflows"
/ "docs.yml"
)
workflow = yaml.safe_load(workflow_path.read_text(encoding="utf-8"))
assert workflow["name"] == "Docs"
steps = workflow["jobs"]["docs"]["steps"]
rendered = yaml.safe_dump(steps, sort_keys=False)
assert "seadonggyun4/truthound-dashboard" not in rendered
assert ".external/truthound-dashboard" not in rendered
assert "prepare_public_docs.py --mode full" in rendered
assert "prepare_public_docs.py --mode public" in rendered
@pytest.mark.contract
def test_ruff_ratchet_manifest_tracks_clean_boundaries():

@@ -362,0 +382,0 @@ targets = _load_ruff_ratchet_manifest()

Sorry, the diff of this file is too big to display