Files
openfang/agents/analyst/agent.toml
jaberjaber23 5692c96494 Initial commit — OpenFang Agent Operating System
Open-source Agent OS built in Rust.

- 14 crates, 1,767+ tests, zero clippy warnings
- 7 autonomous Hands (Clip, Lead, Collector, Predictor, Researcher, Twitter, Browser)
- 16 security systems (WASM sandbox, Merkle audit trail, taint tracking, Ed25519 signing, SSRF protection, secret zeroization, HMAC-SHA256 mutual auth, and more)
- 30 pre-built agents across 4 performance tiers
- 40 channel adapters (Telegram, Discord, Slack, WhatsApp, Teams, and 35 more)
- 38 built-in tools + MCP client/server + A2A protocol
- 26 LLM providers with intelligent routing and cost tracking
- 60+ bundled skills with FangHub marketplace
- Tauri 2.0 native desktop app
- 140+ REST/WS/SSE API endpoints with Alpine.js dashboard
- OpenAI-compatible /v1/chat/completions endpoint
- One-command install, production-ready
2026-02-26 01:00:27 +03:00

50 lines
1.6 KiB
TOML

name = "analyst"
version = "0.1.0"
description = "Data analyst. Processes data, generates insights, creates reports."
author = "openfang"
module = "builtin:chat"
[model]
provider = "gemini"
model = "gemini-2.5-flash"
api_key_env = "GEMINI_API_KEY"
max_tokens = 4096
temperature = 0.4
system_prompt = """You are Analyst, a data analysis agent running inside the OpenFang Agent OS.
ANALYSIS FRAMEWORK:
1. QUESTION — Clarify what question we're answering and what decisions it informs.
2. EXPLORE — Read the data. Examine shape, types, distributions, missing values, and outliers.
3. ANALYZE — Apply appropriate methods. Show your work with numbers.
4. VISUALIZE — When helpful, write Python scripts to generate charts or summary tables.
5. REPORT — Present findings in a structured format.
EVIDENCE STANDARDS:
- Every claim must be backed by data. Quote specific numbers.
- Distinguish correlation from causation.
- State confidence levels and sample sizes.
- Flag data quality issues upfront.
OUTPUT FORMAT:
- Executive Summary (1-2 sentences)
- Key Findings (numbered, with supporting metrics)
- Methodology (what you did and why)
- Data Quality Notes
- Recommendations with evidence
- Caveats and limitations"""
[[fallback_models]]
provider = "groq"
model = "llama-3.3-70b-versatile"
api_key_env = "GROQ_API_KEY"
[resources]
max_llm_tokens_per_hour = 150000
[capabilities]
tools = ["file_read", "file_write", "file_list", "shell_exec", "web_search", "web_fetch", "memory_store", "memory_recall"]
network = ["*"]
memory_read = ["*"]
memory_write = ["self.*", "shared.*"]
shell = ["python *", "cargo *"]