test: expand coverage to 70%+ — add utils, config, curator, proxy, integration tests
- Extend test_utils.py: filter_memories_by_time, merge_memories, calculate_token_budget, build_augmented_messages (mocked) - Extend test_config.py: Config.load() with TOML via tmp_path, CloudConfig helpers, env var api_key - Add test_curator.py: _parse_json_response, _is_recent, _format_raw_turns, _append_rule_to_file - Add test_proxy_handler.py: clean_message_content, handle_chat_non_streaming (mocked httpx+qdrant) - Add test_integration.py: health check, /api/tags, /api/chat non-streaming + streaming via TestClient - Add pytest.ini (asyncio_mode=auto), add pytest-cov to requirements.txt Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -39,4 +39,136 @@ class TestEmbeddingDims:
|
||||
|
||||
def test_mxbai_embed_large(self):
|
||||
"""mxbai-embed-large should have 1024 dimensions."""
|
||||
assert EMBEDDING_DIMS["mxbai-embed-large"] == 1024
|
||||
assert EMBEDDING_DIMS["mxbai-embed-large"] == 1024
|
||||
|
||||
|
||||
class TestConfigLoad:
|
||||
"""Tests for Config.load() with real TOML content."""
|
||||
|
||||
def test_load_from_explicit_path(self, tmp_path):
|
||||
"""Config.load() should parse a TOML file at an explicit path."""
|
||||
from app.config import Config
|
||||
|
||||
config_file = tmp_path / "config.toml"
|
||||
config_file.write_text(
|
||||
'[general]\n'
|
||||
'ollama_host = "http://localhost:11434"\n'
|
||||
'qdrant_host = "http://localhost:6333"\n'
|
||||
'qdrant_collection = "test_memories"\n'
|
||||
)
|
||||
cfg = Config.load(str(config_file))
|
||||
assert cfg.ollama_host == "http://localhost:11434"
|
||||
assert cfg.qdrant_host == "http://localhost:6333"
|
||||
assert cfg.qdrant_collection == "test_memories"
|
||||
|
||||
def test_load_layers_section(self, tmp_path):
|
||||
"""Config.load() should parse [layers] section correctly."""
|
||||
from app.config import Config
|
||||
|
||||
config_file = tmp_path / "config.toml"
|
||||
config_file.write_text(
|
||||
'[layers]\n'
|
||||
'semantic_token_budget = 5000\n'
|
||||
'context_token_budget = 3000\n'
|
||||
'semantic_score_threshold = 0.75\n'
|
||||
)
|
||||
cfg = Config.load(str(config_file))
|
||||
assert cfg.semantic_token_budget == 5000
|
||||
assert cfg.context_token_budget == 3000
|
||||
assert cfg.semantic_score_threshold == 0.75
|
||||
|
||||
def test_load_curator_section(self, tmp_path):
|
||||
"""Config.load() should parse [curator] section correctly."""
|
||||
from app.config import Config
|
||||
|
||||
config_file = tmp_path / "config.toml"
|
||||
config_file.write_text(
|
||||
'[curator]\n'
|
||||
'run_time = "03:30"\n'
|
||||
'curator_model = "mixtral:8x22b"\n'
|
||||
)
|
||||
cfg = Config.load(str(config_file))
|
||||
assert cfg.run_time == "03:30"
|
||||
assert cfg.curator_model == "mixtral:8x22b"
|
||||
|
||||
def test_load_cloud_section(self, tmp_path):
|
||||
"""Config.load() should parse [cloud] section correctly."""
|
||||
from app.config import Config
|
||||
|
||||
config_file = tmp_path / "config.toml"
|
||||
config_file.write_text(
|
||||
'[cloud]\n'
|
||||
'enabled = true\n'
|
||||
'api_base = "https://openrouter.ai/api/v1"\n'
|
||||
'api_key_env = "MY_API_KEY"\n'
|
||||
'\n'
|
||||
'[cloud.models]\n'
|
||||
'"gpt-oss:120b" = "openai/gpt-4o"\n'
|
||||
)
|
||||
cfg = Config.load(str(config_file))
|
||||
assert cfg.cloud.enabled is True
|
||||
assert cfg.cloud.api_base == "https://openrouter.ai/api/v1"
|
||||
assert cfg.cloud.api_key_env == "MY_API_KEY"
|
||||
assert "gpt-oss:120b" in cfg.cloud.models
|
||||
|
||||
def test_load_nonexistent_file_returns_defaults(self, tmp_path):
|
||||
"""Config.load() with missing file should fall back to defaults."""
|
||||
from app.config import Config
|
||||
import os
|
||||
|
||||
# Point config dir to a place with no config.toml
|
||||
os.environ["VERA_CONFIG_DIR"] = str(tmp_path / "noconfig")
|
||||
try:
|
||||
cfg = Config.load(str(tmp_path / "nonexistent.toml"))
|
||||
finally:
|
||||
del os.environ["VERA_CONFIG_DIR"]
|
||||
|
||||
assert cfg.ollama_host == "http://10.0.0.10:11434"
|
||||
|
||||
|
||||
class TestCloudConfig:
|
||||
"""Tests for CloudConfig helper methods."""
|
||||
|
||||
def test_is_cloud_model_true(self):
|
||||
"""is_cloud_model returns True for registered model name."""
|
||||
from app.config import CloudConfig
|
||||
|
||||
cc = CloudConfig(enabled=True, models={"gpt-oss:120b": "openai/gpt-4o"})
|
||||
assert cc.is_cloud_model("gpt-oss:120b") is True
|
||||
|
||||
def test_is_cloud_model_false(self):
|
||||
"""is_cloud_model returns False for unknown model name."""
|
||||
from app.config import CloudConfig
|
||||
|
||||
cc = CloudConfig(enabled=True, models={"gpt-oss:120b": "openai/gpt-4o"})
|
||||
assert cc.is_cloud_model("llama3:70b") is False
|
||||
|
||||
def test_get_cloud_model_existing(self):
|
||||
"""get_cloud_model returns mapped cloud model ID."""
|
||||
from app.config import CloudConfig
|
||||
|
||||
cc = CloudConfig(enabled=True, models={"gpt-oss:120b": "openai/gpt-4o"})
|
||||
assert cc.get_cloud_model("gpt-oss:120b") == "openai/gpt-4o"
|
||||
|
||||
def test_get_cloud_model_missing(self):
|
||||
"""get_cloud_model returns None for unknown name."""
|
||||
from app.config import CloudConfig
|
||||
|
||||
cc = CloudConfig(enabled=True, models={})
|
||||
assert cc.get_cloud_model("unknown") is None
|
||||
|
||||
def test_api_key_from_env(self, monkeypatch):
|
||||
"""api_key property reads from environment variable."""
|
||||
from app.config import CloudConfig
|
||||
|
||||
monkeypatch.setenv("MY_TEST_KEY", "sk-secret")
|
||||
cc = CloudConfig(api_key_env="MY_TEST_KEY")
|
||||
assert cc.api_key == "sk-secret"
|
||||
|
||||
def test_api_key_missing_from_env(self, monkeypatch):
|
||||
"""api_key returns None when env var is not set."""
|
||||
from app.config import CloudConfig
|
||||
|
||||
monkeypatch.delenv("OPENROUTER_API_KEY", raising=False)
|
||||
cc = CloudConfig(api_key_env="OPENROUTER_API_KEY")
|
||||
assert cc.api_key is None
|
||||
Reference in New Issue
Block a user