Initial commit: workspace setup with skills, memory, config
This commit is contained in:
43
skills/perplexity/SKILL.md
Normal file
43
skills/perplexity/SKILL.md
Normal file
@@ -0,0 +1,43 @@
|
||||
# Perplexity API Skill
|
||||
|
||||
Perplexity AI API integration for OpenClaw. Provides search-enhanced LLM responses with citations.
|
||||
|
||||
## API Details
|
||||
|
||||
- **Endpoint**: `https://api.perplexity.ai/chat/completions`
|
||||
- **Key**: Stored in `config.json`
|
||||
- **Models**: sonar, sonar-pro, sonar-reasoning, sonar-deep-research
|
||||
- **Format**: OpenAI-compatible
|
||||
|
||||
## Usage
|
||||
|
||||
```python
|
||||
from skills.perplexity.scripts.query import query_perplexity
|
||||
|
||||
# Simple query
|
||||
response = query_perplexity("What is quantum computing?")
|
||||
|
||||
# With citations
|
||||
response = query_perplexity("Latest AI news", include_citations=True)
|
||||
|
||||
# Specific model
|
||||
response = query_perplexity("Complex research question", model="sonar-deep-research")
|
||||
```
|
||||
|
||||
## Models
|
||||
|
||||
| Model | Best For | Search Context |
|
||||
|-------|----------|----------------|
|
||||
| sonar | Quick answers, simple queries | Low/Medium/High |
|
||||
| sonar-pro | Complex queries, coding | Medium/High |
|
||||
| sonar-reasoning | Step-by-step reasoning | Medium/High |
|
||||
| sonar-deep-research | Comprehensive research | High |
|
||||
|
||||
## Files
|
||||
|
||||
- `scripts/query.py` - Main query interface
|
||||
- `config.json` - API key storage (auto-created)
|
||||
|
||||
## Privacy Note
|
||||
|
||||
Perplexity API sends queries to Perplexity's servers (not local). Use SearXNG for fully local search.
|
||||
6
skills/perplexity/config.json
Normal file
6
skills/perplexity/config.json
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"api_key": "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",
|
||||
"base_url": "https://api.perplexity.ai",
|
||||
"default_model": "sonar",
|
||||
"default_max_tokens": 1000
|
||||
}
|
||||
133
skills/perplexity/scripts/query.py
Executable file
133
skills/perplexity/scripts/query.py
Executable file
@@ -0,0 +1,133 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Perplexity API Query Interface
|
||||
|
||||
Usage:
|
||||
python3 query.py "What is the capital of France?"
|
||||
python3 query.py "Latest AI news" --model sonar-pro --citations
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import urllib.request
|
||||
from pathlib import Path
|
||||
|
||||
def load_config():
|
||||
"""Load API configuration"""
|
||||
config_path = Path(__file__).parent.parent / "config.json"
|
||||
try:
|
||||
with open(config_path) as f:
|
||||
return json.load(f)
|
||||
except Exception as e:
|
||||
print(f"Error loading config: {e}", file=sys.stderr)
|
||||
return None
|
||||
|
||||
def query_perplexity(query, model=None, max_tokens=None, include_citations=False, search_context="low"):
|
||||
"""
|
||||
Query Perplexity API
|
||||
|
||||
Args:
|
||||
query: The question/prompt to send
|
||||
model: Model to use (sonar, sonar-pro, sonar-reasoning, sonar-deep-research)
|
||||
max_tokens: Maximum tokens in response
|
||||
include_citations: Whether to include source citations
|
||||
search_context: Search depth (low, medium, high)
|
||||
|
||||
Returns:
|
||||
dict with response text, citations, and usage info
|
||||
"""
|
||||
config = load_config()
|
||||
if not config:
|
||||
return {"error": "Failed to load configuration"}
|
||||
|
||||
model = model or config.get("default_model", "sonar")
|
||||
max_tokens = max_tokens or config.get("default_max_tokens", 1000)
|
||||
api_key = config.get("api_key")
|
||||
base_url = config.get("base_url", "https://api.perplexity.ai")
|
||||
|
||||
if not api_key:
|
||||
return {"error": "API key not configured"}
|
||||
|
||||
payload = {
|
||||
"model": model,
|
||||
"messages": [
|
||||
{"role": "system", "content": "Be precise and concise."},
|
||||
{"role": "user", "content": query}
|
||||
],
|
||||
"max_tokens": max_tokens,
|
||||
"search_context_size": search_context
|
||||
}
|
||||
|
||||
data = json.dumps(payload).encode()
|
||||
|
||||
req = urllib.request.Request(
|
||||
f"{base_url}/chat/completions",
|
||||
data=data,
|
||||
headers={
|
||||
"Authorization": f"Bearer {api_key}",
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
)
|
||||
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=60) as response:
|
||||
result = json.loads(response.read().decode())
|
||||
|
||||
output = {
|
||||
"text": result["choices"][0]["message"]["content"],
|
||||
"model": result.get("model"),
|
||||
"usage": result.get("usage", {})
|
||||
}
|
||||
|
||||
if include_citations:
|
||||
output["citations"] = result.get("citations", [])
|
||||
output["search_results"] = result.get("search_results", [])
|
||||
|
||||
return output
|
||||
|
||||
except urllib.error.HTTPError as e:
|
||||
error_body = e.read().decode()
|
||||
return {"error": f"HTTP {e.code}: {error_body}"}
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description="Query Perplexity API")
|
||||
parser.add_argument("query", help="The query to send")
|
||||
parser.add_argument("--model", default="sonar",
|
||||
choices=["sonar", "sonar-pro", "sonar-reasoning", "sonar-deep-research"],
|
||||
help="Model to use")
|
||||
parser.add_argument("--max-tokens", type=int, default=1000,
|
||||
help="Maximum tokens in response")
|
||||
parser.add_argument("--citations", action="store_true",
|
||||
help="Include citations in output")
|
||||
parser.add_argument("--search-context", default="low",
|
||||
choices=["low", "medium", "high"],
|
||||
help="Search context size")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
result = query_perplexity(
|
||||
args.query,
|
||||
model=args.model,
|
||||
max_tokens=args.max_tokens,
|
||||
include_citations=args.citations,
|
||||
search_context=args.search_context
|
||||
)
|
||||
|
||||
if "error" in result:
|
||||
print(f"Error: {result['error']}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
print(result["text"])
|
||||
|
||||
if args.citations and result.get("citations"):
|
||||
print("\n--- Sources ---")
|
||||
for i, citation in enumerate(result["citations"][:5], 1):
|
||||
print(f"[{i}] {citation}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
255
skills/perplexity/scripts/search.py
Executable file
255
skills/perplexity/scripts/search.py
Executable file
@@ -0,0 +1,255 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Unified Search - Perplexity primary, SearXNG fallback
|
||||
|
||||
Usage:
|
||||
search "your query" # Perplexity primary, SearXNG fallback
|
||||
search p "your query" # Perplexity only
|
||||
search perplexity "your query" # Perplexity only (alias)
|
||||
search local "your query" # SearXNG only
|
||||
search searxng "your query" # SearXNG only (alias)
|
||||
search --citations "query" # Include citations (Perplexity)
|
||||
search --model sonar-pro "query" # Use specific Perplexity model
|
||||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
import urllib.request
|
||||
import urllib.parse
|
||||
from pathlib import Path
|
||||
|
||||
# Configuration
|
||||
PERPLEXITY_CONFIG = Path(__file__).parent.parent / "config.json"
|
||||
SEARXNG_URL = "http://10.0.0.8:8888"
|
||||
|
||||
def load_perplexity_config():
|
||||
"""Load Perplexity API configuration"""
|
||||
try:
|
||||
with open(PERPLEXITY_CONFIG) as f:
|
||||
return json.load(f)
|
||||
except Exception as e:
|
||||
print(f"Error loading Perplexity config: {e}", file=sys.stderr)
|
||||
return None
|
||||
|
||||
def search_perplexity(query, model="sonar", max_tokens=1000, include_citations=False, search_context="low"):
|
||||
"""Search using Perplexity API"""
|
||||
config = load_perplexity_config()
|
||||
if not config:
|
||||
return {"error": "Perplexity not configured", "fallback_needed": True}
|
||||
|
||||
api_key = config.get("api_key")
|
||||
base_url = config.get("base_url", "https://api.perplexity.ai")
|
||||
|
||||
if not api_key:
|
||||
return {"error": "Perplexity API key not set", "fallback_needed": True}
|
||||
|
||||
payload = {
|
||||
"model": model,
|
||||
"messages": [
|
||||
{"role": "system", "content": "Be precise and concise."},
|
||||
{"role": "user", "content": query}
|
||||
],
|
||||
"max_tokens": max_tokens,
|
||||
"search_context_size": search_context
|
||||
}
|
||||
|
||||
data = json.dumps(payload).encode()
|
||||
|
||||
req = urllib.request.Request(
|
||||
f"{base_url}/chat/completions",
|
||||
data=data,
|
||||
headers={
|
||||
"Authorization": f"Bearer {api_key}",
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
)
|
||||
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=60) as response:
|
||||
result = json.loads(response.read().decode())
|
||||
|
||||
output = {
|
||||
"source": "perplexity",
|
||||
"text": result["choices"][0]["message"]["content"],
|
||||
"model": result.get("model"),
|
||||
"usage": result.get("usage", {}),
|
||||
"citations": result.get("citations", []),
|
||||
"search_results": result.get("search_results", [])
|
||||
}
|
||||
|
||||
return output
|
||||
|
||||
except urllib.error.HTTPError as e:
|
||||
error_body = e.read().decode()
|
||||
if e.code == 429: # Rate limit
|
||||
return {"error": f"Perplexity rate limited: {error_body}", "fallback_needed": True}
|
||||
return {"error": f"Perplexity HTTP {e.code}: {error_body}", "fallback_needed": True}
|
||||
except Exception as e:
|
||||
return {"error": f"Perplexity error: {str(e)}", "fallback_needed": True}
|
||||
|
||||
def search_searxng(query, limit=10):
|
||||
"""Search using local SearXNG"""
|
||||
try:
|
||||
encoded_query = urllib.parse.quote(query)
|
||||
url = f"{SEARXNG_URL}/search?q={encoded_query}&format=json"
|
||||
|
||||
req = urllib.request.Request(url)
|
||||
with urllib.request.urlopen(req, timeout=30) as response:
|
||||
result = json.loads(response.read().decode())
|
||||
|
||||
results = result.get("results", [])[:limit]
|
||||
formatted_results = []
|
||||
|
||||
for r in results:
|
||||
formatted_results.append({
|
||||
"title": r.get("title", ""),
|
||||
"url": r.get("url", ""),
|
||||
"content": r.get("content", "")[:200] + "..." if len(r.get("content", "")) > 200 else r.get("content", "")
|
||||
})
|
||||
|
||||
# Format as readable text
|
||||
text_output = f"Search results for: {query}\n\n"
|
||||
for i, r in enumerate(formatted_results, 1):
|
||||
text_output += f"[{i}] {r['title']}\n{r['url']}\n{r['content']}\n\n"
|
||||
|
||||
return {
|
||||
"source": "searxng",
|
||||
"text": text_output.strip(),
|
||||
"results": formatted_results,
|
||||
"query": query
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {"error": f"SearXNG error: {str(e)}", "fallback_needed": False}
|
||||
|
||||
def unified_search(query, mode="default", model="sonar", include_citations=False, max_tokens=1000, search_context="low"):
|
||||
"""
|
||||
Unified search with Perplexity primary, SearXNG fallback
|
||||
|
||||
Modes:
|
||||
default: Perplexity primary, SearXNG fallback
|
||||
perplexity: Perplexity only
|
||||
local/searxng: SearXNG only
|
||||
"""
|
||||
|
||||
if mode in ["perplexity", "p"]:
|
||||
# Perplexity only
|
||||
result = search_perplexity(query, model, max_tokens, include_citations, search_context)
|
||||
return result
|
||||
|
||||
elif mode in ["local", "searxng", "s"]:
|
||||
# SearXNG only
|
||||
result = search_searxng(query)
|
||||
return result
|
||||
|
||||
else:
|
||||
# Default: Perplexity primary, SearXNG fallback
|
||||
result = search_perplexity(query, model, max_tokens, include_citations, search_context)
|
||||
|
||||
if result.get("fallback_needed") or result.get("error"):
|
||||
print(f"⚠️ Perplexity failed: {result.get('error', 'Unknown error')}", file=sys.stderr)
|
||||
print("🔄 Falling back to SearXNG...\n", file=sys.stderr)
|
||||
|
||||
fallback = search_searxng(query)
|
||||
if not fallback.get("error"):
|
||||
return fallback
|
||||
else:
|
||||
return {"error": f"Both Perplexity and SearXNG failed. Perplexity: {result.get('error')}, SearXNG: {fallback.get('error')}"}
|
||||
|
||||
return result
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Unified search: Perplexity primary, SearXNG fallback",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
search "latest AI news" # Perplexity primary, SearXNG fallback
|
||||
search p "quantum computing explained" # Perplexity only
|
||||
search local "ip address lookup" # SearXNG only
|
||||
search --citations "who invented Python" # Include citations
|
||||
search --model sonar-pro "coding help" # Use Pro model
|
||||
"""
|
||||
)
|
||||
|
||||
parser.add_argument("args", nargs="*", help="[mode] query (mode: p/perplexity/local/searxng)")
|
||||
parser.add_argument("--citations", action="store_true",
|
||||
help="Include citations (Perplexity only)")
|
||||
parser.add_argument("--model", default="sonar",
|
||||
choices=["sonar", "sonar-pro", "sonar-reasoning", "sonar-deep-research"],
|
||||
help="Perplexity model to use")
|
||||
parser.add_argument("--max-tokens", type=int, default=1000,
|
||||
help="Maximum tokens in response (Perplexity)")
|
||||
parser.add_argument("--search-context", default="low",
|
||||
choices=["low", "medium", "high"],
|
||||
help="Search context size (Perplexity)")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Parse positional arguments
|
||||
mode = "default"
|
||||
query_parts = []
|
||||
|
||||
if not args.args:
|
||||
print("Error: No query provided", file=sys.stderr)
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
# Check if first arg is a mode indicator
|
||||
if args.args[0] in ["p", "perplexity", "local", "searxng", "s"]:
|
||||
mode = args.args[0]
|
||||
if mode == "p":
|
||||
mode = "perplexity"
|
||||
elif mode == "s":
|
||||
mode = "searxng"
|
||||
query_parts = args.args[1:]
|
||||
else:
|
||||
query_parts = args.args
|
||||
|
||||
query = " ".join(query_parts)
|
||||
|
||||
if not query:
|
||||
print("Error: No query provided", file=sys.stderr)
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
result = unified_search(
|
||||
query,
|
||||
mode=mode,
|
||||
model=args.model,
|
||||
include_citations=args.citations,
|
||||
max_tokens=args.max_tokens,
|
||||
search_context=args.search_context
|
||||
)
|
||||
|
||||
if "error" in result:
|
||||
print(f"Error: {result['error']}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Print result
|
||||
if result.get("source") == "perplexity":
|
||||
print(f"🔍 Perplexity ({result.get('model', 'unknown')})")
|
||||
if result.get("usage"):
|
||||
cost = result["usage"].get("cost", {})
|
||||
total = cost.get("total_cost", "unknown")
|
||||
print(f"💰 Cost: ${total}")
|
||||
print()
|
||||
print(result["text"])
|
||||
|
||||
if args.citations and result.get("citations"):
|
||||
print("\n--- Sources ---")
|
||||
for i, citation in enumerate(result["citations"][:5], 1):
|
||||
print(f"[{i}] {citation}")
|
||||
|
||||
elif result.get("source") == "searxng":
|
||||
print(f"🔍 SearXNG (local)")
|
||||
print()
|
||||
print(result["text"])
|
||||
|
||||
else:
|
||||
print(result.get("text", "No results"))
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user