Skip to content

Commit

Permalink
Update QELMChatUI.py
Browse files Browse the repository at this point in the history
  • Loading branch information
Inserian authored Jan 27, 2025
1 parent 6c5282f commit 09690ea
Showing 1 changed file with 22 additions and 50 deletions.
72 changes: 22 additions & 50 deletions QELMChatUI.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,30 +2,14 @@
# -*- coding: utf-8 -*-

"""
QELM Conversational UI - Enhanced Basic Version
==========================================
This script provides a chat-style interface to interact with the Quantum-Enhanced Language Model (QELM).
Enhancements include:
1. Support for both .json and .qelm model files.
2. Error handling with output.
3. Modern GUI using ttk.
4. Additional features like clearing chat, saving conversations, and status updates.
5. Added autoloading (if no token file is selected it asks).
Dependencies:
- tkinter (standard with Python)
- numpy
- nltk
Ensure all dependencies are installed before running the script.
Remember that this is a basic chat for the qelm models. This will constantly be updated but not always focused on.
Author: Brenton Carter
==========================================
QELM Conversational UI - Rudi judi
============================================================================
This script provides a chat-style interface to interact with the Quantum-Enhanced
Language Model (QELM), with an enhanced layout similar to modern chat interfaces
(e.g., ChatGPT). The duplication issue in user and QELM messages is resolved by
avoiding multiple appends to the conversation history.
Author: Brenton Carter (modified to fix duplication)
"""

import tkinter as tk
Expand All @@ -36,32 +20,30 @@
import nltk
import os
import datetime
import traceback # For detailed error traces
import traceback

# Initialize NLTK data (only the first time)
nltk.download('punkt', quiet=True)


def normalize_vector(vec: np.ndarray) -> np.ndarray:
"""
Normalize a vector to unit length.
"""

# Normalize a vector to unit length.

norm = np.linalg.norm(vec)
return vec / norm if norm > 1e-12 else vec


def softmax(x: np.ndarray) -> np.ndarray:
"""
Compute softmax values for each set of scores in x.
"""

# Compute softmax values for each set of scores in x.

e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()


def save_conversation(conversation: list, file_path: str):
"""
Save the conversation history to a text file.
"""

try:
with open(file_path, 'w', encoding='utf-8') as f:
for line in conversation:
Expand All @@ -72,10 +54,7 @@ def save_conversation(conversation: list, file_path: str):


class QuantumLanguageModel:
"""
Quantum-Enhanced Language Model combining embeddings and output weights.
Supports loading from both .json and .qelm files along with separate token mapping files.
"""

def __init__(self):
self.vocab_size = None
self.embed_dim = None
Expand Down Expand Up @@ -262,19 +241,11 @@ def run_inference(self, input_text: str, max_length: int = 10):


class QELMChatUI:
"""
Chat-style User Interface for interacting with the QELM,
with an advanced layout resembling modern chat interfaces.
Key Fix for Duplication:
------------------------
Removed extra lines in `handle_send` that appended the user/QELM messages
again after `update_chat` already appended them.
"""
# Below are modifiable values for the UI.
def __init__(self, root):
self.root = root
self.root.title("QELM Chat - Advanced Layout")
self.root.geometry("1200x700")
self.root.title("QELM Chat - Gpt layout")
self.root.geometry("1100x600")
self.root.resizable(False, False)

# Initialize model
Expand Down Expand Up @@ -505,6 +476,8 @@ def prompt_token_map_loading(self):
messagebox.showerror("Token Mapping Load Error", error_message)
self.status_label.config(text="Failed to load token mapping.")

# Tokenization values (may need to be altered for embedding issues)

def display_available_tokens(self):
if not self.model.token_to_id:
self.update_chat("System", "No token mappings available.", color=self.system_color)
Expand All @@ -531,7 +504,6 @@ def handle_send(self, event=None):
self.status_label.config(text="Error during inference.")
response = "<Error: Response generation failed>"


self.refresh_chat_display()
self.user_input.delete(0, tk.END)

Expand Down

0 comments on commit 09690ea

Please sign in to comment.