Adds final version with new prompt
This commit is contained in:
@@ -2,6 +2,7 @@ import streamlit as st
|
||||
import time
|
||||
from backend import BDAgent
|
||||
import boto3
|
||||
from boto3.dynamodb.conditions import Key
|
||||
|
||||
# Configure the page - MUST BE FIRST
|
||||
st.set_page_config(
|
||||
@@ -11,9 +12,19 @@ st.set_page_config(
|
||||
)
|
||||
|
||||
session = boto3.Session()
|
||||
dynamodb = boto3.resource("dynamodb", region_name="us-east-1")
|
||||
|
||||
def list_bases():
|
||||
table = dynamodb.Table("poc_dnx_monthly_summary")
|
||||
response = table.query(
|
||||
IndexName="item_type_index",
|
||||
KeyConditionExpression=Key("item_type").eq("contexto")
|
||||
)
|
||||
return [item["id"].removesuffix("_contexto") for item in response.get("Items", [])]
|
||||
|
||||
# Model selection with session state persistence
|
||||
MODELS = [
|
||||
"anthropic.claude-haiku-4-5-20251001-v1:0",
|
||||
"anthropic.claude-sonnet-4-5-20250929-v1:0",
|
||||
"meta.llama4-maverick-17b-instruct-v1:0",
|
||||
"meta.llama4-scout-17b-instruct-v1:0",
|
||||
@@ -27,7 +38,13 @@ selected_value = st.selectbox(
|
||||
MODELS,
|
||||
key="selected_model"
|
||||
)
|
||||
|
||||
BASES = list_bases()
|
||||
BASES.sort()
|
||||
base = st.selectbox(
|
||||
"Selecione a base:",
|
||||
BASES,
|
||||
key="selected_base"
|
||||
)
|
||||
|
||||
|
||||
# Initialize chat history in session state
|
||||
@@ -57,17 +74,19 @@ if prompt := st.chat_input("Type your message here..."):
|
||||
|
||||
# Simulate streaming response (replace with actual API call)
|
||||
|
||||
full_response = BDAgent.main(prompt,str(st.session_state.messages),selected_value)
|
||||
|
||||
result = BDAgent.main(prompt,str(st.session_state.messages),selected_value,base)
|
||||
full_response = result["response"]
|
||||
|
||||
# Simulate typing effect
|
||||
displayed_response = ""
|
||||
for char in full_response:
|
||||
displayed_response += char
|
||||
message_placeholder.markdown(displayed_response + "▌")
|
||||
time.sleep(0.01)
|
||||
|
||||
|
||||
message_placeholder.markdown(full_response)
|
||||
|
||||
st.caption(f"Tokens: {result['input_tokens']} in / {result['output_tokens']} out / {result['total_tokens']} total")
|
||||
|
||||
# Add assistant response to chat history
|
||||
st.session_state.messages.append({"role": "assistant", "content": full_response})
|
||||
|
||||
|
||||
Reference in New Issue
Block a user