Skip to content

Commit

Permalink
accept changes
Browse files Browse the repository at this point in the history
  • Loading branch information
freddysongg committed Nov 25, 2024
1 parent 029de86 commit 60ddedb
Show file tree
Hide file tree
Showing 10 changed files with 120 additions and 51 deletions.
144 changes: 93 additions & 51 deletions app.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,20 @@
import requests
import streamlit as st
import pandas as pd
import numpy as np
import plotly.graph_objects as go
import streamlit.components.v1 as components

# API Base URL
# API Base URL
API_BASE_URL = "http://127.0.0.1:8000"

# UI Setup
st.set_page_config(
page_title="Café ML Demo",
layout="centered",
page_title="Café ML Dashboard",
layout="wide",
initial_sidebar_state="expanded",
)

# Style Settings
# Custom Styles
st.markdown("""
<style>
body {
Expand All @@ -24,56 +24,88 @@
.stButton>button {
background-color: #4e342e;
color: white;
border-radius: 12px;
font-size: 16px;
padding: 8px 20px;
}
.stButton>button:hover {
background-color: #6d4c41;
background-color: #3e2723;
color: white;
}
.sidebar .sidebar-content {
text-align: center;
}
.reportview-container .main .block-container {
max-width: 90%;
padding-top: 2rem;
}
.header {
font-size: 32px;
font-weight: bold;
margin-bottom: 1rem;
}
.subheader {
font-size: 18px;
margin-bottom: 1.5rem;
}
</style>
""", unsafe_allow_html=True)

st.title("☕ Café ML Demo")
st.sidebar.title("⚙️ Settings")
# Header
st.markdown("<div class='header'>☕ Café ML Dashboard</div>", unsafe_allow_html=True)
st.markdown("<div class='subheader'>Analyze and Predict Revenue & Product Performance</div>", unsafe_allow_html=True)

# Layout Setup
# Grid Layout
col1, col2 = st.columns([1, 2]) # Sidebar occupies 1/3 and main content 2/3

# Sidebar options
model_type = st.sidebar.selectbox("Select Model Type", ["LSTM", "Transformer", "ARIMA"])
seq_length = st.sidebar.number_input("Sequence Length", min_value=5, max_value=50, value=10, step=1)
uploaded_file = st.sidebar.file_uploader("Upload Test Data (CSV)", type=["csv"])
dark_mode = st.sidebar.checkbox("Enable Dark Mode")
# Sidebar for User Input
with col1:
st.sidebar.title("⚙️ Settings")

# Dark Mode Styling
if dark_mode:
st.markdown("""
<style>
body {
background-color: #2c2c2c;
color: #f4f1ea;
}
.stButton>button {
background-color: #6d4c41;
color: white;
}
</style>
""", unsafe_allow_html=True)
# Text Input for Data Entry
st.sidebar.markdown("### Enter Sales Data")
text_input = st.sidebar.text_area(
"Paste your sales data or product details:",
placeholder="Enter data in plain text..."
)

# Dropdown for Model Selection
model_type = st.sidebar.selectbox("Select Prediction Model", ["LSTM", "Transformer", "ARIMA"])

# Option to Upload CSV
uploaded_file = st.sidebar.file_uploader("Or Upload Test Data (CSV)", type=["csv"])

# Load Data
data = None
if uploaded_file:
data = pd.read_csv(uploaded_file)
st.sidebar.write("Data preview:")
st.sidebar.write(data.head())
else:
st.sidebar.warning("Upload a CSV file to proceed.")
# Process Text Input Using Gemini API (or equivalent)
if st.sidebar.button("Prepare Input Data"):
if text_input.strip():
# Simulate Gemini API for preprocessing
try:
response = requests.post(f"{API_BASE_URL}/process", json={"text": text_input})
response.raise_for_status()
prepared_data = response.json()["prepared_data"]
st.sidebar.success("Data processed successfully!")
st.sidebar.write(prepared_data) # Display prepared data
except requests.exceptions.RequestException as e:
st.sidebar.error(f"Error processing data: {e}")
else:
st.sidebar.warning("Please enter text data or upload a file.")

# Run Inference
if st.button("Run Inference"):
if data is None:
st.error("Please upload a test data file first.")
else:
# Convert data to JSON-friendly format
input_data = {"data": data.values.flatten().tolist()}
# Main Content Area
with col2:
# Prediction Results Section
st.markdown("### 🎯 Predictions")
if st.button("Run Prediction"):
if uploaded_file:
data = pd.read_csv(uploaded_file)
input_data = {"data": data.values.flatten().tolist()}
elif "prepared_data" in locals():
input_data = {"data": prepared_data}
else:
st.error("No data available for prediction. Please upload or process data first.")
st.stop()

# Call API based on model type
# Call Prediction API
endpoint = {
"LSTM": "/predict/lstm",
"Transformer": "/predict/transformer",
Expand All @@ -85,23 +117,33 @@
response = requests.post(API_BASE_URL + endpoint, json=input_data)
response.raise_for_status()
predictions = response.json()["predictions"]
st.success("Prediction successful!")

# Show Predictions
st.write("### Prediction Results")
st.write(pd.DataFrame(predictions, columns=["Predicted Values"]))

# Visualization
st.success("Inference complete! Here are the results:")
# Reserve Space for Visualization
st.write("### Visualizations")
fig = go.Figure()
fig.add_trace(go.Scatter(y=data.values.flatten(), name="Actual", mode="lines"))
fig.add_trace(go.Scatter(y=predictions, name="Predicted", mode="lines"))
fig.add_trace(go.Scatter(y=input_data["data"], name="Actual Data", mode="lines"))
fig.add_trace(go.Scatter(y=predictions, name="Predictions", mode="lines"))
fig.update_layout(
title="Actual vs Predicted",
title="Actual vs Predicted Data",
xaxis_title="Time Steps",
yaxis_title="Values",
template="plotly_dark" if dark_mode else "plotly_white",
template="plotly_white"
)
st.plotly_chart(fig)
except requests.exceptions.RequestException as e:
st.error(f"API call failed: {e}")
st.error(f"Prediction failed: {e}")
else:
st.error("Invalid model type selected.")

# Placeholder for Future Graphs
st.markdown("### 📊 Future Graph Visualizations")
st.info("Graph visualization space reserved for future updates.")

# Footer
st.markdown("#### Made with ❤️ for CaféCast")
st.markdown("<hr>", unsafe_allow_html=True)
st.markdown("#### Made with ❤️ for CaféCast by [Your Team Name]")
Binary file added models/best_lstm_bayesian_model.keras
Binary file not shown.
6 changes: 6 additions & 0 deletions models/best_lstm_bayesian_model_metrics.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
{
"mae": 136.36266355514528,
"rmse": 181.1664698793561,
"training_loss": 0.004868278745561838,
"val_loss": 0.006319853011518717
}
Binary file added models/best_lstm_model.keras
Binary file not shown.
6 changes: 6 additions & 0 deletions models/best_lstm_model_metrics.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
{
"mae": 119.10698909078326,
"rmse": 143.48743870036978,
"training_loss": 0.0067557827569544315,
"val_loss": 0.009065927006304264
}
Binary file added models/best_ts_transformer_model.keras
Binary file not shown.
Binary file added models/best_ts_transformer_model.pt
Binary file not shown.
Binary file added models/scaler.pkl
Binary file not shown.
Binary file added models/scaler_lstm_bayesian.pkl
Binary file not shown.
15 changes: 15 additions & 0 deletions models/time_series_transformer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
import torch.nn as nn

class TimeSeriesTransformer(nn.Module):
def __init__(self, input_size, num_layers, num_heads, d_model, dim_feedforward):
super(TimeSeriesTransformer, self).__init__()
self.encoder_layer = nn.TransformerEncoderLayer(
d_model=d_model, nhead=num_heads, dim_feedforward=dim_feedforward
)
self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=num_layers)
self.fc = nn.Linear(d_model, 1)

def forward(self, x):
x = self.transformer_encoder(x)
x = self.fc(x)
return x

0 comments on commit 60ddedb

Please sign in to comment.