mirror of
https://github.com/RightNow-AI/openfang.git
synced 2026-04-25 17:25:11 +02:00
Open-source Agent OS built in Rust. - 14 crates, 1,767+ tests, zero clippy warnings - 7 autonomous Hands (Clip, Lead, Collector, Predictor, Researcher, Twitter, Browser) - 16 security systems (WASM sandbox, Merkle audit trail, taint tracking, Ed25519 signing, SSRF protection, secret zeroization, HMAC-SHA256 mutual auth, and more) - 30 pre-built agents across 4 performance tiers - 40 channel adapters (Telegram, Discord, Slack, WhatsApp, Teams, and 35 more) - 38 built-in tools + MCP client/server + A2A protocol - 26 LLM providers with intelligent routing and cost tracking - 60+ bundled skills with FangHub marketplace - Tauri 2.0 native desktop app - 140+ REST/WS/SSE API endpoints with Alpine.js dashboard - OpenAI-compatible /v1/chat/completions endpoint - One-command install, production-ready
49 lines
1.8 KiB
Plaintext
49 lines
1.8 KiB
Plaintext
# OpenFang Agent OS — Example Configuration
|
|
# Copy to ~/.openfang/config.toml and customize.
|
|
|
|
# API server settings
|
|
# api_key = "" # Set to enable Bearer auth (recommended)
|
|
# listen_addr = "127.0.0.1:3000" # HTTP API bind address
|
|
|
|
[default_model]
|
|
provider = "anthropic" # "anthropic", "gemini", "openai", "groq", "ollama", etc.
|
|
model = "claude-sonnet-4-20250514" # Model identifier
|
|
api_key_env = "ANTHROPIC_API_KEY" # Environment variable holding API key
|
|
# base_url = "https://api.anthropic.com" # Optional: override API endpoint
|
|
|
|
[memory]
|
|
decay_rate = 0.05 # Memory confidence decay rate
|
|
# sqlite_path = "~/.openfang/data/openfang.db" # Optional: custom DB path
|
|
|
|
[network]
|
|
listen_addr = "127.0.0.1:4200" # OFP listen address
|
|
# shared_secret = "" # Required for P2P authentication
|
|
|
|
# Session compaction (LLM-based context management)
|
|
# [compaction]
|
|
# threshold = 80 # Compact when messages exceed this count
|
|
# keep_recent = 20 # Keep this many recent messages after compaction
|
|
# max_summary_tokens = 1024 # Max tokens for LLM summary
|
|
|
|
# Usage tracking display
|
|
# usage_footer = "Full" # "Off", "Tokens", "Cost", or "Full"
|
|
|
|
# Channel adapters (configure tokens via environment variables)
|
|
# [telegram]
|
|
# bot_token_env = "TELEGRAM_BOT_TOKEN"
|
|
# allowed_users = [] # Empty = allow all
|
|
|
|
# [discord]
|
|
# bot_token_env = "DISCORD_BOT_TOKEN"
|
|
# guild_ids = [] # Empty = all guilds
|
|
|
|
# [slack]
|
|
# bot_token_env = "SLACK_BOT_TOKEN"
|
|
# app_token_env = "SLACK_APP_TOKEN"
|
|
|
|
# MCP server connections
|
|
# [[mcp_servers]]
|
|
# name = "filesystem"
|
|
# command = "npx"
|
|
# args = ["-y", "@modelcontextprotocol/server-filesystem", "/tmp"]
|