49 lines
1.4 KiB
Python
49 lines
1.4 KiB
Python
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
|
|
from langfuse.langchain import CallbackHandler
|
|
|
|
from .config import REGION
|
|
from .dynamo import langfuse, get_contexto
|
|
from .agent_bedrock import create_agent
|
|
from .tools import ReportTools
|
|
|
|
|
|
def main(user_query, history, model, base):
|
|
"""Main execution function."""
|
|
report_tools = []
|
|
|
|
|
|
SYSTEM_PROMPT = """"""
|
|
langfuse_handler = CallbackHandler()
|
|
agent = create_agent(model, REGION, tools=report_tools)
|
|
|
|
initial_state = {
|
|
"messages": [
|
|
SystemMessage(content=SYSTEM_PROMPT),
|
|
HumanMessage(content=user_query),
|
|
],
|
|
"current_step": "init",
|
|
}
|
|
|
|
config = {"callbacks": [langfuse_handler], "tags": [base]}
|
|
final_state = agent.invoke(initial_state, config=config)
|
|
|
|
total_input_tokens = 0
|
|
total_output_tokens = 0
|
|
for msg in final_state["messages"]:
|
|
if isinstance(msg, AIMessage) and hasattr(msg, "usage_metadata") and msg.usage_metadata:
|
|
total_input_tokens += msg.usage_metadata.get("input_tokens", 0)
|
|
total_output_tokens += msg.usage_metadata.get("output_tokens", 0)
|
|
|
|
langfuse.flush()
|
|
return {
|
|
"response": final_state["messages"][-1].content,
|
|
"input_tokens": total_input_tokens,
|
|
"output_tokens": total_output_tokens,
|
|
"total_tokens": total_input_tokens + total_output_tokens,
|
|
}
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main(
|
|
)
|