| 1234567891011121314151617181920212223242526272829 |
- from langchain_community.llms import Ollama
- from langchain.chains import LLMChain
- from langchain.prompts import PromptTemplate
- import os
- from dotenv import load_dotenv
- # Load environment variables
- load_dotenv()
- # Initialize Ollama LLM
- llm = Ollama(
- model="mistral",
- temperature=0.7
- )
- # Define a simple prompt template
- prompt = PromptTemplate(
- input_variables=["question"],
- template="Responda à seguinte pergunta: {question}"
- )
- # Create the LLM chain
- chain = LLMChain(llm=llm, prompt=prompt)
- def get_answer(question: str) -> str:
- return chain.run(question=question)
- if __name__ == "__main__":
- print(get_answer("Qual a capital da França?"))
|