74 lines
2.7 KiB
Python
74 lines
2.7 KiB
Python
import boto3
|
|
from langchain_core.prompts import ChatPromptTemplate,MessagesPlaceholder
|
|
from langchain_core.messages import HumanMessage,AIMessage
|
|
from langchain.chains import create_history_aware_retriever
|
|
from langchain.chains.combine_documents import create_stuff_documents_chain
|
|
import langchain.chains
|
|
from langchain.chains import create_retrieval_chain
|
|
from langchain_aws import ChatBedrock
|
|
from langchain_aws.retrievers import AmazonKnowledgeBasesRetriever
|
|
from langchain.chains import ConversationalRetrievalChain
|
|
from dotenv import load_dotenv
|
|
load_dotenv()
|
|
import os
|
|
llm = ChatBedrock(
|
|
model_id="arn:aws:bedrock:us-east-1:654654422992:application-inference-profile/d9blf0g3fzqz",
|
|
region_name="us-east-1",
|
|
aws_access_key_id=os.environ["AWS_ACCESS_KEY_ID"],
|
|
aws_secret_access_key=os.environ["AWS_SECRET_ACCESS_KEY"],
|
|
aws_session_token=os.environ["AWS_SESSION_TOKEN"],
|
|
model_kwargs={"temperature": 0.2, 'max_tokens': 1000,},
|
|
provider='anthropic'
|
|
)
|
|
# Cria o prompt de busca
|
|
prompt_search_query = ChatPromptTemplate.from_messages([
|
|
MessagesPlaceholder(variable_name="chat_history"),
|
|
("user", "{input}"),
|
|
("assistant", "-"),
|
|
("user", "Given the above conversation, generate a search query to look up to get information relevant to the conversation")
|
|
])
|
|
|
|
# Cria o prompt de resposta
|
|
prompt_get_answer = ChatPromptTemplate.from_messages([
|
|
("system", "Answer the user's questions based on the below context:\n\n{context}"),
|
|
MessagesPlaceholder(variable_name="chat_history"),
|
|
("user", "{input}")
|
|
])
|
|
|
|
# Conecta ao Qdrant
|
|
retriever = AmazonKnowledgeBasesRetriever(
|
|
knowledge_base_id="RBD9TI5HYU",
|
|
region_name="us-east-1",
|
|
retrieval_config={"vectorSearchConfiguration": {"numberOfResults": 4}},
|
|
|
|
)
|
|
# Cria o retriever com histórico
|
|
retriever_chain = create_history_aware_retriever(llm, retriever, prompt_search_query)
|
|
|
|
# Cria o documento chain
|
|
document_chain = create_stuff_documents_chain(llm, prompt_get_answer)
|
|
|
|
# Cria o retrieval chain
|
|
retrieval_chain= create_retrieval_chain(
|
|
retriever_chain,document_chain
|
|
)
|
|
|
|
def chat_with_bot(user_input, chat_history):
|
|
"""
|
|
Função para interagir com o chatbot.
|
|
|
|
Args:
|
|
user_input (str): A entrada do usuário.
|
|
chat_history (list): O histórico da conversa, incluindo mensagens do usuário e do assistente.
|
|
|
|
Returns:
|
|
str: A resposta do chatbot.
|
|
"""
|
|
# Chama o chain de recuperação com o histórico e a entrada do usuário
|
|
response = retrieval_chain.invoke({
|
|
"chat_history": chat_history,
|
|
"input": user_input,
|
|
})
|
|
# Retorna a resposta do assistente
|
|
return response['answer']
|
|
print(chat_with_bot("Quanto é o auxilio?",chat_history=[{"role":"user","content":"Hello"}])) |