Prechádzať zdrojové kódy

added structure log to health endpoint

galo 3 mesiacov pred
rodič
commit
19c5e3f584
4 zmenil súbory, kde vykonal 35 pridanie a 30 odobranie
  1. 4 0
      README.md
  2. 11 1
      app/api/chat.py
  3. 0 1
      app/schemas/chat.py
  4. 20 28
      app/services/qa.py

+ 4 - 0
README.md

@@ -27,4 +27,8 @@ curl -X POST http://localhost:8000/chat/?session_id=test_session -H "Content-Typ
 ou
 
 curl -X POST http://localhost:8000/ask/ -F "session_id=test_session" -F "question=What is the capital of France?" -F "file=@E:/test.txt"
+
+ou
+
+curl http://localhost:8000/health
 ```

+ 11 - 1
app/api/chat.py

@@ -2,6 +2,7 @@ from fastapi import APIRouter, HTTPException, UploadFile, File, Form
 from app.services.qa import get_answer, ask_rag
 from app.schemas.chat import ChatRequest, AskRequest, ChatResponse, AskResponse
 import time
+from datetime import datetime
 
 router = APIRouter()
 
@@ -37,4 +38,13 @@ async def ask(session_id: str = Form("default_session"), question: str = Form(..
 
 @router.get("/health")
 async def health():
-    return {"status": "healthy"}
+    current_time = datetime.utcnow().isoformat() + "Z"  # UTC time with 'Z' suffix
+    log_entry = {
+        "timestamp": current_time,
+        "level": "INFO",
+        "message": "Health check successful",
+        "status": "healthy",
+        "service": "chat-api",
+        "version": "1.0.0",  # Example version, adjust as needed
+    }
+    return log_entry

+ 0 - 1
app/schemas/chat.py

@@ -10,7 +10,6 @@ class ChatResponse(BaseModel):
     answer: str
     latency_ms: int
 
-# Optional: Define AskResponse for consistency (though not currently used)
 class AskResponse(BaseModel):
     answer: str
     sources: list[str]

+ 20 - 28
app/services/qa.py

@@ -5,6 +5,7 @@ from langchain_community.vectorstores import Chroma
 from langchain_community.embeddings import OllamaEmbeddings
 from langchain_community.document_loaders import TextLoader
 from langchain_text_splitters import CharacterTextSplitter
+from langchain.memory import ConversationBufferWindowMemory
 import os
 from dotenv import load_dotenv
 from tempfile import NamedTemporaryFile
@@ -12,9 +13,6 @@ from tempfile import NamedTemporaryFile
 # Load environment variables
 load_dotenv()
 
-# Global dictionary for chat history
-chat_history = {}
-
 # Initialize Ollama LLM and Embeddings
 llm = Ollama(model="tinyllama", temperature=0.7)
 embeddings = OllamaEmbeddings(model="tinyllama")
@@ -42,59 +40,53 @@ def index_file(file_content: bytes, file_name: str):
     os.unlink(temp_file_path)
 
 # Define prompt templates
-def get_prompt_with_history(session_id):
-    history = chat_history.get(session_id, [])
-    history_text = "\n".join([f"User: {msg['question']}\nAI: {msg['answer']}" for msg in history]) if history else "No previous conversation."
+def get_prompt_with_history(memory):
     return PromptTemplate(
-        input_variables=["question"],
-        template=f"Previous conversation:\n{history_text}\n\nResponda à seguinte pergunta: {{question}}"
+        input_variables=["history", "question"],
+        template=f"Previous conversation:\n{{history}}\n\nResponda à seguinte pergunta: {{question}}"
     )
 
-def get_prompt_with_history_and_docs(session_id, docs):
-    history = chat_history.get(session_id, [])
-    history_text = "\n".join([f"User: {msg['question']}\nAI: {msg['answer']}" for msg in history]) if history else "No previous conversation."
+def get_prompt_with_history_and_docs(memory, docs):
     docs_text = "\n".join([f"Source: {doc.page_content}" for doc in docs]) if docs else "No relevant documents found."
     return PromptTemplate(
-        input_variables=["question"],
-        template=f"Previous conversation:\n{history_text}\n\nRelevant documents:\n{docs_text}\n\nResponda à seguinte pergunta usando as fontes relevantes e citando trechos como fontes: {{question}}"
+        input_variables=["history", "question"],
+        template=f"Previous conversation:\n{{history}}\n\nRelevant documents:\n{docs_text}\n\nResponda à seguinte pergunta usando as fontes relevantes e citando trechos como fontes: {{question}}"
     )
 
 def get_answer(session_id: str, question: str) -> str:
-    # Get or initialize chat history for this session
-    if session_id not in chat_history:
-        chat_history[session_id] = []
+    # Get or initialize memory for this session
+    memory = ConversationBufferWindowMemory(memory_key="history", input_key="question", k=3, session_id=session_id)
     
     # Create chain with dynamic prompt including history
-    prompt = get_prompt_with_history(session_id)
-    chain = LLMChain(llm=llm, prompt=prompt)
+    prompt = get_prompt_with_history(memory)
+    chain = LLMChain(llm=llm, prompt=prompt, memory=memory)
     
     # Get response
     response = chain.run(question=question)
     response = response[:100] if len(response) > 100 else response  # Truncate if needed
     
-    # Store the interaction in history
-    chat_history[session_id].append({"question": question, "answer": response})
-    
     return response
 
 # RAG function for /ask endpoint
 def ask_rag(session_id: str, question: str, file_content: bytes = None, file_name: str = None) -> dict:
+    # Get or initialize memory for this session
+    memory = ConversationBufferWindowMemory(memory_key="history", input_key="question", k=3, session_id=session_id)
+    
     if file_content and file_name:
         index_file(file_content, file_name)
     
-    if session_id not in chat_history:
-        chat_history[session_id] = []
-    
+    # Retrieve relevant documents
     docs = vector_store.similarity_search(question, k=3)
     
-    prompt = get_prompt_with_history_and_docs(session_id, docs)
-    chain = LLMChain(llm=llm, prompt=prompt)
+    # Create chain with dynamic prompt including history and docs
+    prompt = get_prompt_with_history_and_docs(memory, docs)
+    chain = LLMChain(llm=llm, prompt=prompt, memory=memory)
     
+    # Get response
     response = chain.run(question=question)
     response = response[:100] if len(response) > 100 else response
     
-    chat_history[session_id].append({"question": question, "answer": response})
-    
+    # Prepare sources
     sources = [doc.page_content for doc in docs]
     
     return {"answer": response, "sources": sources}