package com.ai.config; import com.ai.service.Assist; import dev.langchain4j.community.model.dashscope.QwenChatModel; import dev.langchain4j.data.document.Document; import dev.langchain4j.data.document.loader.FileSystemDocumentLoader; import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.memory.chat.MessageWindowChatMemory; import dev.langchain4j.model.chat.ChatLanguageModel; import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever; import dev.langchain4j.service.AiServices; import dev.langchain4j.store.embedding.EmbeddingStoreIngestor; import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore; import lombok.RequiredArgsConstructor; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import java.util.List; @Configuration @RequiredArgsConstructor public class AssistantInit { // All files in a directory, txt seems to be faster @Bean public Assist init() { ChatLanguageModel qwenModel = QwenChatModel.builder() .apiKey("sk-2f703a41fff0488e9b6888013d2ee58a") .modelName("deepseek-v3") .build(); List documents = FileSystemDocumentLoader.loadDocuments("E:\\ideaProject\\liang-ai"); // for simplicity, we will use an in-memory one: InMemoryEmbeddingStore embeddingStore = new InMemoryEmbeddingStore<>(); EmbeddingStoreIngestor.ingest(documents, embeddingStore); return AiServices.builder(Assist.class) .chatMemoryProvider(memoryId -> MessageWindowChatMemory.withMaxMessages(10)) .chatLanguageModel(qwenModel) .contentRetriever(EmbeddingStoreContentRetriever.from(embeddingStore)).build(); } }