# Ollama API
ollama-api-url: "
http://localhost:11434/api/generate"
model: "llama3"
ollama-enabled: true
# Streaming settings
stream-settings:
enabled: true # Whether to enable streaming for AI responses
# Chat
trigger-prefixes:
- "
@Bot"
- "@ai"
# Length
max-response-length: 500 # Maximum length of AI responses in characters
# History
max-history: 5 # Maximum number of chat history entries to retain per conversation
# Language Settings
language: "en_us" # Language file to use (e.g., en_us.json)
# Progress Display Settings
progress-display:
enabled: true # Whether to enable progress display
type: "bossbar" # Display type (bossbar or actionbar)
color: "BLUE" # BossBar color (BLUE, GREEN, RED, etc.)
style: "SOLID" # BossBar style (SOLID, SEGMENTED_6, etc.)
update-interval: 1 # Progress update frequency (in seconds)
# Suggested Response
suggested-responses-enabled: false # Whether to enable suggested responses
suggested-response-models:
- "llama3" # AI models used for generating suggested responses
suggested-response-count: 3 # Number of suggested responses to generate
suggested-response-prompt: "Conversation:\nUser: {prompt}\nAI: {response}\n\nBased on the above conversation, suggest {count} natural follow-up responses the user might want to say. They should be conversational in tone rather than questions. List them as:\n1. Response 1\n2. Response 2\n3. Response 3"
suggested-response-presets:
- "I see what you mean."
- "That's interesting!"
- "Tell me more about that."
suggested-response-model-toggles:
llama3: true # Toggle for each model in suggested-response-models
suggested-response-cooldown: 10 # Cooldown between suggested responses (in seconds)
suggested-response-presets-enabled: false # Whether to enable preset suggested responses
# Database
database:
type: sqlite # Database type (sqlite or mysql)
mysql:
host: localhost
port: 3306
database: ollamachat
username: root
password: ""
hikari: # HikariCP connection pool settings for MySQL
maximum-pool-size: 10 # Maximum number of connections in the pool
minimum-idle: 2 # Minimum number of idle connections
connection-timeout: 30000 # Connection timeout in milliseconds
idle-timeout: 600000 # Idle connection timeout in milliseconds
max-lifetime: 1800000 # Maximum lifetime of a connection in milliseconds
cache-prep-stmts: true # Cache prepared statements
prep-stmt-cache-size: 250 # Prepared statement cache size
prep-stmt-cache-sql-limit: 2048 # SQL limit for prepared statement cache
# Default prompt to prepend to user inputs (empty for none)
default-prompt: ""
# Custom prompts
prompts:
# Example:
# friendly: "You are a friendly assistant who responds in a cheerful tone."
# formal: "You are a professional assistant who responds formally."
# Other AI Configurations
other-ai-configs:
openai:
api-url: "
https://api.openai.com/v1/chat/completions"
api-key: "your-openai-api-key"
model: "gpt-4"
enabled: false
messages-format: true