51 lines
3.6 KiB
Plaintext
51 lines
3.6 KiB
Plaintext
# API Keys (Required to enable respective provider)
|
|
ANTHROPIC_API_KEY="your_anthropic_api_key_here" # Required: Format: sk-ant-api03-...
|
|
PERPLEXITY_API_KEY="your_perplexity_api_key_here" # Optional: Format: pplx-...
|
|
OPENAI_API_KEY="your_openai_api_key_here" # Required for AI features. Format: sk-proj-...
|
|
GOOGLE_API_KEY="your_google_api_key_here" # Optional, for Google Gemini models.
|
|
MISTRAL_API_KEY="your_mistral_key_here" # Optional, for Mistral AI models.
|
|
XAI_API_KEY="YOUR_XAI_KEY_HERE" # Optional, for xAI AI models.
|
|
GROQ_API_KEY="YOUR_GROQ_KEY_HERE" # Optional, for Groq models.
|
|
OPENROUTER_API_KEY="YOUR_OPENROUTER_KEY_HERE" # Optional, for OpenRouter models.
|
|
AZURE_OPENAI_API_KEY="your_azure_key_here" # Optional, for Azure OpenAI models (requires endpoint in .taskmaster/config.json).
|
|
OLLAMA_API_KEY="your_ollama_api_key_here" # Optional: For remote Ollama servers that require authentication.
|
|
GITHUB_API_KEY="your_github_api_key_here" # Optional: For GitHub import/export features. Format: ghp_... or github_pat_...
|
|
|
|
# OpenAI Configuration
|
|
OPENAI_MODEL="gpt-4-turbo" # Model to use (gpt-4-turbo, gpt-4, gpt-3.5-turbo)
|
|
OPENAI_MAX_TOKENS="4096" # Maximum tokens per request
|
|
OPENAI_TEMPERATURE="0.7" # Temperature for responses (0-2)
|
|
OPENAI_RETRY_ATTEMPTS="3" # Number of retry attempts on failure
|
|
OPENAI_RETRY_DELAY_MS="1000" # Delay between retries in milliseconds
|
|
OPENAI_TIMEOUT="30000" # Request timeout in milliseconds
|
|
|
|
# LangChain Configuration
|
|
LANGCHAIN_API_KEY="" # Optional: For LangChain-specific features
|
|
LANGCHAIN_VERBOSE="false" # Enable verbose logging
|
|
LANGCHAIN_CACHE_ENABLED="true" # Enable response caching
|
|
LANGCHAIN_CACHE_TTL="3600" # Cache TTL in seconds (1 hour)
|
|
LANGCHAIN_MAX_CONCURRENCY="5" # Maximum concurrent operations
|
|
LANGCHAIN_MEMORY_BUFFER_SIZE="10" # Number of messages to keep in memory
|
|
|
|
# AI Rate Limiting
|
|
AI_MAX_REQUESTS_PER_MINUTE="60" # Maximum API requests per minute
|
|
AI_MAX_TOKENS_PER_MINUTE="90000" # Maximum tokens per minute
|
|
AI_MAX_REQUESTS_PER_DAY="10000" # Maximum requests per day
|
|
|
|
# AI Monitoring
|
|
AI_TRACK_TOKEN_USAGE="true" # Track token usage for cost monitoring
|
|
AI_LOG_LEVEL="info" # Log level (debug, info, warn, error)
|
|
AI_METRICS_ENABLED="true" # Enable metrics collection
|
|
|
|
# NLP Configuration
|
|
NLP_ENABLE_CACHING="true" # Enable NLP result caching
|
|
NLP_CACHE_MAX_AGE="3600000" # Cache max age in ms (1 hour)
|
|
NLP_MAX_TOKENS="2000" # Max tokens per NLP request
|
|
NLP_DEFAULT_LANGUAGE="en" # Default language code
|
|
NLP_SUPPORTED_LANGUAGES="en,es,fr,de,pt,it,zh,ja" # Comma-separated language codes
|
|
NLP_CONFIDENCE_THRESHOLD="0.7" # Minimum confidence threshold (0-1)
|
|
|
|
# Multi-language Support
|
|
NLP_AUTO_TRANSLATE="true" # Auto-translate non-English input
|
|
NLP_TRANSLATION_CACHE_SIZE="1000" # Max cached translations
|
|
NLP_LANGUAGE_DETECTION_THRESHOLD="0.7" # Min confidence for language detection |