Feat: Adds base project

This commit is contained in:
2026-02-25 10:55:19 -03:00
parent 31f87ab437
commit 624f5dc7e6
46 changed files with 2355 additions and 0 deletions

32
back/app/api.py Normal file
View File

@@ -0,0 +1,32 @@
from fastapi import FastAPI
from pydantic import BaseModel
from .backend import orquestrador
app = FastAPI()
@app.get("/")
def health():
return {"status": "ok"}
class QueryRequest(BaseModel):
query: str
history: str = ""
class QueryResponse(BaseModel):
response: str
input_tokens: int
output_tokens: int
total_tokens: int
@app.post("/agent", response_model=QueryResponse)
def run_agent(request: QueryRequest):
result = orquestrador.main(request.query, request.history)
return QueryResponse(
response=result["response"],
input_tokens=result["input_tokens"],
output_tokens=result["output_tokens"],
total_tokens=result["total_tokens"],
)

View File

View File

@@ -0,0 +1,120 @@
import operator
from typing import TypedDict, Annotated
from langchain_aws import ChatBedrockConverse
from langchain_core.messages import AIMessage, ToolMessage
from langgraph.graph import StateGraph, END
from .config import REGION, AWS_ACCOUNT
class AgentState(TypedDict):
messages: Annotated[list, operator.add]
current_step: str
def create_bedrock_llm(model_id: str, region: str = REGION, tools: list = None):
"""
Create a ChatBedrock instance using a model ID.
Args:
model_id: Bedrock model ID (e.g., anthropic.claude-haiku-4-5-20251001-v1:0)
region: AWS region (default: REGION env var)
tools: List of LangChain tools to bind to the model
Returns:
ChatBedrock instance configured with the model
"""
MODEL_ARNS = {
"anthropic.claude-haiku-4-5-20251001-v1:0": f"arn:aws:bedrock:{REGION}:{AWS_ACCOUNT}:inference-profile/us.anthropic.claude-haiku-4-5-20251001-v1:0",
"anthropic.claude-sonnet-4-5-20250929-v1:0": f"arn:aws:bedrock:{REGION}:{AWS_ACCOUNT}:inference-profile/global.anthropic.claude-sonnet-4-5-20250929-v1:0",
"meta.llama4-maverick-17b-instruct-v1:0": f"arn:aws:bedrock:{REGION}:{AWS_ACCOUNT}:inference-profile/us.meta.llama4-maverick-17b-instruct-v1:0",
"meta.llama4-scout-17b-instruct-v1:0": f"arn:aws:bedrock:{REGION}:{AWS_ACCOUNT}:inference-profile/us.meta.llama4-scout-17b-instruct-v1:0",
"amazon.nova-lite-v1:0": f"arn:aws:bedrock:{REGION}:{AWS_ACCOUNT}:inference-profile/us.amazon.nova-lite-v1:0",
"amazon.nova-pro-v1:0": f"arn:aws:bedrock:{REGION}:{AWS_ACCOUNT}:inference-profile/us.amazon.nova-pro-v1:0",
"amazon.nova-2-lite-v1:0": f"arn:aws:bedrock:{REGION}:{AWS_ACCOUNT}:inference-profile/global.amazon.nova-2-lite-v1:0",
}
PROVIDER = {
"anthropic.claude-haiku-4-5-20251001-v1:0": "anthropic",
"anthropic.claude-sonnet-4-5-20250929-v1:0": "anthropic",
"meta.llama4-maverick-17b-instruct-v1:0": "meta",
"meta.llama4-scout-17b-instruct-v1:0": "meta",
"amazon.nova-lite-v1:0": "amazon",
"amazon.nova-pro-v1:0": "amazon",
"amazon.nova-2-lite-v1:0": "amazon",
}
prefix = {
"anthropic.claude-haiku-4-5-20251001-v1:0": "us",
"anthropic.claude-sonnet-4-5-20250929-v1:0": "global",
"meta.llama4-maverick-17b-instruct-v1:0": "us",
"meta.llama4-scout-17b-instruct-v1:0": "us",
"amazon.nova-lite-v1:0": "us",
"amazon.nova-pro-v1:0": "us",
"amazon.nova-2-lite-v1:0": "global",
}
llm = ChatBedrockConverse(
model_id=prefix[model_id] + "." + model_id,
region_name=region,
provider=PROVIDER[model_id],
max_tokens=2048,
temperature=0.7,
)
return llm.bind_tools(tools or [])
def call_model(state: AgentState, llm) -> AgentState:
"""Call the LLM with tools."""
response = llm.invoke(state["messages"])
state["current_step"] = "model_called"
return {"messages": [response]}
def call_tools(state: AgentState, tools_map: dict) -> AgentState:
"""Execute any tool calls from the LLM response."""
last_message = state["messages"][-1]
if hasattr(last_message, "tool_calls") and last_message.tool_calls:
tool_messages = []
for tool_call in last_message.tool_calls:
result = tools_map[tool_call["name"]].invoke(tool_call["args"])
tool_messages.append(ToolMessage(content=str(result), tool_call_id=tool_call["id"]))
state["current_step"] = "tools_executed"
return {"messages": tool_messages}
else:
state["current_step"] = "no_tools"
return {"messages": []}
def should_continue(state: AgentState) -> str:
"""Determine if we should continue to tools or end."""
last_message = state["messages"][-1]
if hasattr(last_message, "tool_calls") and last_message.tool_calls:
return "tools"
return "end"
def create_agent(inference_profile_arn: str, region: str = REGION, tools: list = None):
"""
Create a LangGraph agent that uses Bedrock inference profile with tools.
Args:
inference_profile_arn: ARN of the cross-region inference profile
region: AWS region
tools: List of LangChain tools to bind to the model
Returns:
Compiled LangGraph workflow
"""
tools = tools or []
llm = create_bedrock_llm(inference_profile_arn, region, tools)
tools_map = {t.name: t for t in tools}
workflow = StateGraph(AgentState)
workflow.add_node("model", lambda state: call_model(state, llm))
workflow.add_node("tools", lambda state: call_tools(state, tools_map))
workflow.set_entry_point("model")
workflow.add_conditional_edges("model", should_continue, {"tools": "tools", "end": END})
workflow.add_edge("tools", "model")
return workflow.compile()

View File

@@ -0,0 +1,6 @@
import os
TABLE = os.environ["TABLE"]
REGION = os.environ["REGION"]
AWS_ACCOUNT = os.environ["AWS_ACCOUNT"]
SECRET_NAME = os.environ["SECRET_NAME"]

View File

@@ -0,0 +1,53 @@
import boto3
import json
import os
from botocore.exceptions import ClientError
from langfuse import Langfuse
from .config import REGION, TABLE, SECRET_NAME
dynamodb = boto3.resource("dynamodb", region_name=REGION)
def get_secret() -> str:
session = boto3.session.Session()
client = session.client(service_name="secretsmanager", region_name=REGION)
try:
response = client.get_secret_value(SecretId=SECRET_NAME)
except ClientError as e:
raise e
return response["SecretString"]
secrets = json.loads(get_secret())
langfuse = Langfuse(
public_key=secrets["LANGFUSE-PUBLIC-KEY"],
secret_key=secrets["LANGFUSE-SECRET-KEY"],
host=os.environ["LANGFUSE_HOST"],
)
def get_contexto(dashboard: str) -> dict:
"""
Get contexto, filter, and items_disponiveis from DynamoDB for a given dashboard.
Returns:
Dict with 'contexto', 'filter', and 'items_disponiveis' keys
"""
try:
table = dynamodb.Table(TABLE)
response = table.get_item(Key={"id": dashboard + "_contexto"})
if "Item" not in response:
return {"contexto": "", "filter": "", "items_disponiveis": {}}
item = response["Item"]
return {
"contexto": item.get("contexto", ""),
"filter": item.get("filter_key", ""),
"items_disponiveis": item.get("itens_disponiveis", {}),
}
except ClientError as e:
error_message = e.response["Error"]["Message"]
return {"contexto": f"Error: {error_message}", "filter": "", "items_disponiveis": {}}

View File

@@ -0,0 +1,48 @@
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
from langfuse.langchain import CallbackHandler
from .config import REGION
from .dynamo import langfuse, get_contexto
from .agent_bedrock import create_agent
from .tools import ReportTools
def main(user_query, history, model, base):
"""Main execution function."""
report_tools = []
SYSTEM_PROMPT = """"""
langfuse_handler = CallbackHandler()
agent = create_agent(model, REGION, tools=report_tools)
initial_state = {
"messages": [
SystemMessage(content=SYSTEM_PROMPT),
HumanMessage(content=user_query),
],
"current_step": "init",
}
config = {"callbacks": [langfuse_handler], "tags": [base]}
final_state = agent.invoke(initial_state, config=config)
total_input_tokens = 0
total_output_tokens = 0
for msg in final_state["messages"]:
if isinstance(msg, AIMessage) and hasattr(msg, "usage_metadata") and msg.usage_metadata:
total_input_tokens += msg.usage_metadata.get("input_tokens", 0)
total_output_tokens += msg.usage_metadata.get("output_tokens", 0)
langfuse.flush()
return {
"response": final_state["messages"][-1].content,
"input_tokens": total_input_tokens,
"output_tokens": total_output_tokens,
"total_tokens": total_input_tokens + total_output_tokens,
}
if __name__ == "__main__":
main(
)

85
back/app/backend/tools.py Normal file
View File

@@ -0,0 +1,85 @@
from botocore.exceptions import ClientError
from langchain_core.tools import StructuredTool
from .config import TABLE
from .dynamo import dynamodb
class ReportTools:
def __init__(self, id_mapping: dict[str, str]):
self.id_mapping = id_mapping
def get_variable_value(self, id: str, variable: str) -> str:
"""
Get a specific variable's value from DynamoDB for a specific id.
Args:
id: The id of the data
variable: The variable/column name to retrieve from the table
Returns:
The content of the specified variable for the given id
"""
real_id = self.id_mapping.get(id, id)
try:
table = dynamodb.Table(TABLE)
response = table.get_item(Key={"id": real_id})
if "Item" not in response:
return f"No report found for month: {id}"
item = response["Item"]
content = item.get(variable, "")
if not content:
return f"Variable '{variable}' not found for month: {id}"
return f"<{id}>\n{content}\n</{id}>"
except ClientError as e:
error_message = e.response["Error"]["Message"]
return f"Error fetching report: {error_message}"
def get_variables_list(self, id: str) -> str:
"""
Get the list of variables available in the table for a specific month.
Args:
id: The id of the data
Returns:
The list of available variables/keys for the specified data
"""
real_id = self.id_mapping.get(id, id)
try:
table = dynamodb.Table(TABLE)
response = table.get_item(Key={"id": real_id})
if "Item" not in response:
return f"No data found for month: {id}"
item = response["Item"]
chaves_consolidadas = item.get("chaves_consolidadas", "")
if not chaves_consolidadas:
return f"No consolidated keys found for id: {id}"
return chaves_consolidadas
except ClientError as e:
error_message = e.response["Error"]["Message"]
return f"Error fetching consolidated keys: {error_message}"
def as_tools(self) -> list:
return [
StructuredTool.from_function(
self.get_variable_value,
name="get_variable_value",
description="Get a specific variable's data from DynamoDB for a specific id.",
),
StructuredTool.from_function(
self.get_variables_list,
name="get_variable_list",
description="Get the list of variables available in the table for a specific id.",
),
]

75
back/app/front.py Normal file
View File

@@ -0,0 +1,75 @@
import streamlit as st
import time
from backend import orquestrador
import boto3
from boto3.dynamodb.conditions import Key
# Configure the page - MUST BE FIRST
st.set_page_config(
page_title="Chatbot",
page_icon="💬",
layout="centered"
)
session = boto3.Session()
# Initialize chat history in session state
if "messages" not in st.session_state:
st.session_state.messages = []
# Display title
st.title("💬 Chatbot")
# Display chat history
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Chat input
if prompt := st.chat_input("Type your message here..."):
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display user message
with st.chat_message("user"):
st.markdown(prompt)
# Display assistant response
with st.chat_message("assistant"):
message_placeholder = st.empty()
# Simulate streaming response (replace with actual API call)
result = orquestrador.main(prompt,str(st.session_state.messages),selected_value,base)
full_response = result["response"]
# Simulate typing effect
displayed_response = ""
for char in full_response:
displayed_response += char
message_placeholder.markdown(displayed_response + "")
time.sleep(0.01)
message_placeholder.markdown(full_response)
st.caption(f"Tokens: {result['input_tokens']} in / {result['output_tokens']} out / {result['total_tokens']} total")
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": full_response})
# Add a sidebar with options
with st.sidebar:
st.header("Options")
if st.button("Clear Chat History"):
st.session_state.messages = []
st.rerun()
st.divider()
st.markdown("""
### How to use:
1. Type your message in the input box
2. Press Enter or click Send
3. View the conversation history
""")