qa.py 670 B

1234567891011121314151617181920212223242526272829
  1. from langchain_community.llms import Ollama
  2. from langchain.chains import LLMChain
  3. from langchain.prompts import PromptTemplate
  4. import os
  5. from dotenv import load_dotenv
  6. # Load environment variables
  7. load_dotenv()
  8. # Initialize Ollama LLM
  9. llm = Ollama(
  10. model="mistral",
  11. temperature=0.7
  12. )
  13. # Define a simple prompt template
  14. prompt = PromptTemplate(
  15. input_variables=["question"],
  16. template="Responda à seguinte pergunta: {question}"
  17. )
  18. # Create the LLM chain
  19. chain = LLMChain(llm=llm, prompt=prompt)
  20. def get_answer(question: str) -> str:
  21. return chain.run(question=question)
  22. if __name__ == "__main__":
  23. print(get_answer("Qual a capital da França?"))