liang-spring-ai/src/main/resources/application.yml

57 lines
1.5 KiB
YAML

spring:
application:
name: spring-ai-demo
elasticsearch:
uris: http://154.12.80.119:9200
username: elastic
password: 123456
ai:
deepseek:
api-key: sk-3043bb4777404970a22c7544dd30aaa2
dashscope:
api-key: sk-2f703a41fff0488e9b6888013d2ee58a
chat:
options:
model: qwen-plus
zhipuai:
api-key: 73f440ddeafc47ba94ed66e35fbd63d7.VmlulRZ4BMWexncF
openai:
api-key: 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
# 如果您有代理服务,可以修改为代理地址
# base-url: https://your-proxy-service.com/v1
base-url: https://api.openai.com
chat:
options:
model: gpt-3.5-turbo
temperature: 0.7
# 增加超时配置
client:
connect-timeout: 30000 # 30秒连接超时
read-timeout: 60000 # 60秒读取超时
vectorstore:
elasticsearch:
initialize-schema: false
index-name: custom-index
dimensions: 1536
similarity: cosine
uris: http://154.12.80.119:9200
username: elastic
password: 123456
# ollama:
# base-url: ${OLLAMA_BASE_URL:http://localhost:11434}
# chat:
# options:
# model: ${OLLAMA_MODEL:llama2}
mcp:
server:
name: james-hero-mcp-server
version: 1.0.0
type: sync
server:
port: 8009
logging:
level:
org.springframework.ai: DEBUG