Dockerfile and pulumi stack for infra

This commit is contained in:
2025-09-05 18:02:43 -03:00
parent 180a639bdb
commit 32f4607c4b
7 changed files with 72 additions and 42 deletions

View File

@@ -1,3 +1,4 @@
import json
from langchain_core.tools import tool
from langchain.agents.output_parsers import ReActJsonSingleInputOutputParser
from langchain_core.prompts import ChatPromptTemplate,MessagesPlaceholder, PromptTemplate
@@ -16,7 +17,15 @@ from langchain_core.agents import AgentAction, AgentFinish
from langchain.agents.output_parsers import ReActSingleInputOutputParser
from langchain.tools import Tool
import os
llm = ChatBedrock(
def find_tool_by_name(tools: list[Tool],tool_name:str):
for tool in tools:
if tool.name==tool_name:
print(tool.name)
print("\n\n")
return tool
raise ValueError(f"Tool with name {tool_name} not found")
def agent_call(event,context):
llm = ChatBedrock(
model_id="arn:aws:bedrock:us-east-1:654654422992:application-inference-profile/d9blf0g3fzqz",
region_name="us-east-1",
aws_access_key_id=os.environ["AWS_ACCESS_KEY_ID"],
@@ -25,18 +34,12 @@ llm = ChatBedrock(
model_kwargs={"temperature": 0.2, 'max_tokens': 1000,},
provider='anthropic'
)
retriever = AmazonKnowledgeBasesRetriever(
retriever = AmazonKnowledgeBasesRetriever(
knowledge_base_id="RBD9TI5HYU",
region_name="us-east-1",
retrieval_config={"vectorSearchConfiguration": {"numberOfResults": 4}},
)
def find_tool_by_name(tools: list[Tool],tool_name:str):
for tool in tools:
if tool.name==tool_name:
return tool
raise ValueError(f"Tool with name {tool_name} not found")
if __name__=="__main__":
print("Hello React Langhain")
tools=[retriever.as_tool()]
template="""Você é um assistente para alunos de diversos campus diferentes do instituto federal de são paulo, sua função é responder perguntas
@@ -62,13 +65,12 @@ Question: {input}
Chat history:{chat_history}
Thought: {agent_scratchpad}
"""
prompt=PromptTemplate.from_template(template=template).partial(tools=render_text_description(tools), tool_names=','.join([t.name for t in tools]))
#llm=ChatOpenAI(model="gpt-4o-mini",temperature=0,stop_sequences=["\nObservation:"])
intermediate_steps=[]
agent= {"input": lambda x:x["input"],"agent_scratchpad": lambda x:format_log_to_str(x["agent_scratchpad"]),"chat_history":lambda x:x["chat_history"]}|prompt | llm
agent_step: Union[AgentAction,AgentFinish]=agent.invoke({"input": "Quanto é o valor do auxilio moradia?","agent_scratchpad":intermediate_steps,"chat_history":{"role":"user","content":""}})
agent= {"input": lambda x:x["input"],"agent_scratchpad": lambda x:format_log_to_str(x["agent_scratchpad"]),"chat_history":lambda x:x["chat_history"]}|prompt | llm|ReActJsonSingleInputOutputParser()
agent_step: Union[AgentAction,AgentFinish]=agent.invoke({"input": "Que dia é hoje?","agent_scratchpad":intermediate_steps,"chat_history":{"role":"user","content":"sou do campus sao paulo"}})
#print(agent_step)
if isinstance(agent_step,AgentAction):
tool_name=agent_step.tool
@@ -77,5 +79,10 @@ Thought: {agent_scratchpad}
observation=tool_to_use.func(str(tool_input))
print(f"{observation=}")
intermediate_steps.append((agent_step,str(observation)))
agent_step: Union[AgentAction,AgentFinish]=agent.invoke({"input": "Quanto é o valor do auxiliomoradia?","agent_scratchpad":intermediate_steps,"chat_history":{"role":"user","content":""}})
print(agent_step)
agent_step: Union[AgentAction,AgentFinish]=agent.invoke({"input": "Quanto é o valor do auxilio moradia?","agent_scratchpad":intermediate_steps,"chat_history":{"role":"user","content":"sou do campus sao paulo"}})
return agent_step
def hello(event,context):
return{
"statusCode":200,
"body":json.dumps("hello_world")
}