diff --git a/application/frontend/src/pages/chatbot/chatbot.tsx b/application/frontend/src/pages/chatbot/chatbot.tsx
index 8664de593..290b023ff 100644
--- a/application/frontend/src/pages/chatbot/chatbot.tsx
+++ b/application/frontend/src/pages/chatbot/chatbot.tsx
@@ -204,7 +204,7 @@ export const Chatbot = () => {
'yaml',
'zephir',
];
- type chatMessage = { timestamp: string; role: string; message: string; data: Document[] | null };
+ type chatMessage = { timestamp: string; role: string; message: string; data: Document[] | null; };
interface ChatState {
term: string;
error: string;
@@ -287,7 +287,7 @@ export const Chatbot = () => {
timestamp: new Date().toLocaleTimeString(),
role: 'assistant',
message: data.response,
- data: data.table,
+ data: data.table
},
]);
})
@@ -299,11 +299,23 @@ export const Chatbot = () => {
}
function displayDocument(d: Document) {
- return (
-
- *Reference: The above answer was based on the {d.name} section of{' '}
- {d.section ? d.section : d.sectionID};
-
+
+ var link = "/node/" + d.doctype.toLowerCase() + "/" + d.name
+ if (d.section) {
+ link = link + "/section/" + d.section
+ } else {
+ link = link + "/sectionid/" + d.sectionID
+ }
+ return (
+
*Reference: The above answer was based on:
+ {d.name} section: {' '}
+ {d.section ? d.section : d.sectionID};
+
+
+
+ You can find more information about {d.name} on its OpenCRE page
+
+
);
}
@@ -337,8 +349,8 @@ export const Chatbot = () => {
{processResponse(m.message)}
{m.data
? m.data?.map((m2) => {
- return displayDocument(m2);
- })
+ return displayDocument(m2);
+ })
: ''}
@@ -372,6 +384,14 @@ export const Chatbot = () => {
+
+
+ ChatCRE uses Google's PALM2 LLM, you can find the code for OpenCRE in https://github.com/owaps/OpenCRE.
+ Your question travels to Heroku (OpenCRE hosting provider) and then to GCP over a protected connection.
+ Your data is never stored in the OpenCRE servers, you can start a new session by refreshing your page.
+ The OpenCRE team has taken all reasonable precautions we could think off to protect your privacy and security.
+
+
diff --git a/application/prompt_client/prompt_client.py b/application/prompt_client/prompt_client.py
index 95918bccc..280556b1d 100644
--- a/application/prompt_client/prompt_client.py
+++ b/application/prompt_client/prompt_client.py
@@ -415,9 +415,9 @@ def generate_text(self, prompt: str) -> Dict[str, str]:
timestamp = datetime.now().strftime("%I:%M:%S %p")
if not prompt:
return {"response": "", "table": "", "timestamp": timestamp}
- logger.info(f"getting embeddings for {prompt}")
+ logger.debug(f"getting embeddings for {prompt}")
question_embedding = self.ai_client.get_text_embeddings(prompt)
- logger.info(f"retrieved embeddings for {prompt}")
+ logger.debug(f"retrieved embeddings for {prompt}")
# Find the closest area in the existing embeddings
closest_id, similarity = self.get_id_of_most_similar_node_paginated(
@@ -426,8 +426,7 @@ def generate_text(self, prompt: str) -> Dict[str, str]:
)
closest_object = self.database.get_node_by_db_id(closest_id)
answer = ""
-
- logger.info(
+ logger.debug(
f"The prompt {prompt}, was most similar to object \n{closest_object}\n, with similarity:{similarity}"
)
closest_content = ""
@@ -449,7 +448,7 @@ def generate_text(self, prompt: str) -> Dict[str, str]:
else:
return {"response": "An adequate answer could not be found", "table": [""]}
- logger.info(f"retrieved completion for {prompt}")
+ logger.debug(f"retrieved completion for {prompt}")
table = [closest_object]
result = f"Answer: {answer}"
return {"response": result, "table": table}