mirror of
https://github.com/we-promise/sure
synced 2026-04-25 17:15:07 +02:00
105 lines
4.1 KiB
Plaintext
105 lines
4.1 KiB
Plaintext
# To enable / disable self-hosting features.
|
|
SELF_HOSTED = true
|
|
|
|
# Custom port config
|
|
# For users who have other applications listening at 3000, this allows them to set a value puma will listen to.
|
|
PORT=3000
|
|
|
|
# SimpleFIN runtime flags (default-off)
|
|
# Accepted truthy values: 1, true, yes, on
|
|
# SIMPLEFIN_DEBUG_RAW: when truthy, logs the raw payload returned by SimpleFIN (debug-only; can be noisy)
|
|
SIMPLEFIN_DEBUG_RAW=false
|
|
# SIMPLEFIN_INCLUDE_PENDING: when truthy, forces `pending=1` on SimpleFIN fetches when caller doesn't specify `pending:`
|
|
SIMPLEFIN_INCLUDE_PENDING=false
|
|
|
|
# Lunchflow runtime flags (default-off)
|
|
# LUNCHFLOW_DEBUG_RAW: when truthy, logs the raw payload returned by Lunchflow (debug-only; can be noisy)
|
|
LUNCHFLOW_DEBUG_RAW=false
|
|
# LUNCHFLOW_INCLUDE_PENDING: when truthy, adds `include_pending=true` to Lunchflow transaction fetch requests
|
|
LUNCHFLOW_INCLUDE_PENDING=false
|
|
|
|
# Controls onboarding flow (valid: open, closed, invite_only)
|
|
ONBOARDING_STATE = open
|
|
|
|
# Enable Twelve market data (careful, this will use your API credits)
|
|
TWELVE_DATA_API_KEY =
|
|
|
|
# OpenAI-compatible API endpoint config
|
|
OPENAI_ACCESS_TOKEN =
|
|
OPENAI_URI_BASE =
|
|
OPENAI_MODEL =
|
|
|
|
# LLM token budget. Applies to ALL outbound LLM calls: chat history,
|
|
# auto-categorize, merchant detection, provider enhancer, PDF processing.
|
|
# Defaults to Ollama's historical 2048-token baseline so small local models
|
|
# work out of the box — raise explicitly for cloud or larger-context models.
|
|
# LLM_CONTEXT_WINDOW = 2048 # Total tokens the model will accept
|
|
# LLM_MAX_RESPONSE_TOKENS = 512 # Reserved for the model's reply
|
|
# LLM_MAX_HISTORY_TOKENS = # Derived if unset (context - response - system_reserve)
|
|
# LLM_SYSTEM_PROMPT_RESERVE = 256 # Tokens reserved for the system prompt
|
|
# LLM_MAX_ITEMS_PER_CALL = 25 # Upper bound on auto-categorize / merchant batches
|
|
|
|
# OpenAI-compatible capability flags (custom/self-hosted providers)
|
|
# OPENAI_REQUEST_TIMEOUT = 60 # HTTP timeout in seconds; raise for slow local models
|
|
# OPENAI_SUPPORTS_PDF_PROCESSING = true # Set to false for endpoints without vision support
|
|
# OPENAI_SUPPORTS_RESPONSES_ENDPOINT = # true to force Responses API on custom providers
|
|
# LLM_JSON_MODE = # auto | strict | json_object | none
|
|
|
|
# (example: LM Studio/Docker config) OpenAI-compatible API endpoint config
|
|
# OPENAI_URI_BASE = http://host.docker.internal:1234/
|
|
# OPENAI_MODEL = qwen/qwen3-vl-4b
|
|
|
|
# OpenID Connect for development
|
|
OIDC_CLIENT_ID=
|
|
OIDC_CLIENT_SECRET=
|
|
OIDC_ISSUER=
|
|
OIDC_REDIRECT_URI=http://localhost:3000/auth/openid_connect/callback
|
|
|
|
# Langfuse config
|
|
LANGFUSE_PUBLIC_KEY =
|
|
LANGFUSE_SECRET_KEY =
|
|
LANGFUSE_HOST = https://cloud.langfuse.com
|
|
|
|
# Set to `true` to get error messages rendered in the /chats UI
|
|
AI_DEBUG_MODE =
|
|
|
|
# =============================================================================
|
|
# SSL/TLS Configuration for Self-Signed Certificates
|
|
# =============================================================================
|
|
# Use these settings when connecting to services with self-signed or internal
|
|
# CA certificates (e.g., self-hosted Keycloak, Authentik, or AI endpoints).
|
|
#
|
|
# SSL_CA_FILE: Path to custom CA certificate file (PEM format)
|
|
# - The certificate that signed your server's SSL certificate
|
|
# - Must be readable by the application
|
|
# - Will be validated at startup
|
|
# SSL_CA_FILE = /certs/my-ca.crt
|
|
#
|
|
# SSL_VERIFY: Enable/disable SSL certificate verification
|
|
# - Default: true (verification enabled)
|
|
# - Set to "false" ONLY for development/testing
|
|
# - WARNING: Disabling removes protection against man-in-the-middle attacks
|
|
# SSL_VERIFY = true
|
|
#
|
|
# SSL_DEBUG: Enable verbose SSL logging for troubleshooting
|
|
# - Default: false
|
|
# - When enabled, logs detailed SSL connection information
|
|
# - Useful for diagnosing certificate issues
|
|
# SSL_DEBUG = false
|
|
#
|
|
# Example docker-compose.yml configuration:
|
|
# services:
|
|
# app:
|
|
# environment:
|
|
# SSL_CA_FILE: /certs/my-ca.crt
|
|
# SSL_DEBUG: "true"
|
|
# volumes:
|
|
# - ./my-ca.crt:/certs/my-ca.crt:ro
|
|
|
|
# Active Storage Configuration
|
|
# ACTIVE_STORAGE_SERVICE=google
|
|
# GCS_PROJECT=
|
|
# GCS_BUCKET=
|
|
# GCS_KEYFILE_JSON=
|
|
# GCS_KEYFILE=
|