Compare commits

..

7 Commits

Author SHA1 Message Date
Kacper E. Dudzic
4ff281954f
Update README.md 2023-06-29 13:26:39 +02:00
Kacper
04eac5ac2f add elements from the presentation version 2023-06-29 13:16:17 +02:00
s444417
8a1677d02a fix requirements 2023-06-29 12:45:32 +02:00
s444417
cadd564387 fix instruction bug 2023-06-28 23:28:38 +02:00
s444417
6fa7ff4820 add instruction 2023-06-28 23:00:54 +02:00
s444417
0c8e63d488 move files to root 2023-06-28 22:55:54 +02:00
6a8f83f2b7 unnecessary tab in graph generation deleted 2023-06-18 09:30:16 +02:00
534 changed files with 106 additions and 82 deletions

View File

@ -1,7 +1,7 @@
RECIPE_PATH=AMUseBotFront/ai_talks/AMUseBotBackend/recipe/ RECIPE_PATH=recipe/
DIALOG_PATH=AMUseBotFront/ai_talks/AMUseBotBackend/dialog/ DIALOG_PATH=dialog/
INTENT_DICT_PATH=ai_talks/AMUseBotBackend/utils/intent_dict.json INTENT_DICT_PATH=intent_dict.json
MODEL_IDENTIFIER_PATH=ai_talks/AMUseBotBackend/models/NLU/roberta-base-cookdial.txt MODEL_IDENTIFIER_PATH=roberta-base-cookdial-v1_1.txt
INGREDIENTS_RECIPES_MERGED= INGREDIENTS_RECIPES_MERGED=ingredients_recipes_merged.csv
CHARACTERS_DICT= CHARACTERS_DICT=characters_dict.json
API_KEY= API_KEY=

36
README.md Normal file
View File

@ -0,0 +1,36 @@
# Cooking taskbot project
## Run system
#### With Conda
conda create -n "my_env" python=3.9.12 ipython
conda activate my_env
pip install -r requirements.txt
streamlit run ai_talks\chat.py
After running system, model saves in dir:
Linux
~/.cache/huggingface/transformers
Windows
C:\Users\username\.cache\huggingface\transformers
To use the purely experimental generative features, for now, an OpenAI API key is needed. Insert it into the following file:
AMUseBot/.env_template
## Requirements
Python 3.9.12
## Dataset
[YiweiJiang2015/CookDial](https://github.com/YiweiJiang2015/CookDial)
## NLU model HF repo
[kedudzic/roberta-base-cookdial](https://huggingface.co/AMUseBot/roberta-base-cookdial-v1_1)

View File

@ -52,7 +52,7 @@ class NLG:
def llm_substitute_product(character, user_message): def llm_substitute_product(character, user_message):
input = st.session_state.characters_dict['task_substitute'] + f'"{user_message}".' input = st.session_state.characters_dict['task_substitute'] + f'"{user_message}".' + st.session_state.characters_dict['characters'][character]['task_specification']
try: try:
return NLG.llm_create_response(character, input) return NLG.llm_create_response(character, input)

View File

@ -7,7 +7,7 @@ from rank_bm25 import BM25Okapi
import os import os
from dotenv import load_dotenv from dotenv import load_dotenv
load_dotenv() load_dotenv('.env_template')
INGREDIENTS_RECIPES_MERGED = os.getenv('INGREDIENTS_RECIPES_MERGED') INGREDIENTS_RECIPES_MERGED = os.getenv('INGREDIENTS_RECIPES_MERGED')

View File

@ -12,7 +12,9 @@ from src.utils.lang import en
import openai import openai
import copy import copy
import json import json
import string
import streamlit.components.v1 as components
import re
import os import os
from dotenv import load_dotenv from dotenv import load_dotenv
@ -28,11 +30,6 @@ if __name__ == '__main__':
favicon: Path = icons_dir / "favicons/0.png" favicon: Path = icons_dir / "favicons/0.png"
# --- GENERAL SETTINGS --- # --- GENERAL SETTINGS ---
LANG_PL: str = "Pl" LANG_PL: str = "Pl"
AI_MODEL_OPTIONS: list[str] = [
"gpt-3.5-turbo",
"gpt-4",
"gpt-4-32k",
]
CONFIG = {"page_title": "AMUsebot", "page_icon": Image.open(favicon)} CONFIG = {"page_title": "AMUsebot", "page_icon": Image.open(favicon)}
@ -42,7 +39,7 @@ if __name__ == '__main__':
with open(css_file) as f: with open(css_file) as f:
st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True) st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True)
load_dotenv() load_dotenv('.env_template')
DIALOG_PATH = os.getenv('DIALOG_PATH') DIALOG_PATH = os.getenv('DIALOG_PATH')
RECIPE_PATH = os.getenv('RECIPE_PATH') RECIPE_PATH = os.getenv('RECIPE_PATH')
@ -60,8 +57,6 @@ if __name__ == '__main__':
st.session_state.messages = [] st.session_state.messages = []
if "user_text" not in st.session_state: if "user_text" not in st.session_state:
st.session_state.user_text = "" st.session_state.user_text = ""
if "input_kind" not in st.session_state:
st.session_state.input_kind = st.session_state.locale.input_kind_1
if "seed" not in st.session_state: if "seed" not in st.session_state:
st.session_state.seed = randrange(10 ** 3) # noqa: S311 st.session_state.seed = randrange(10 ** 3) # noqa: S311
if "costs" not in st.session_state: if "costs" not in st.session_state:
@ -79,39 +74,48 @@ if __name__ == '__main__':
with open(CHARACTERS_DICT) as f: with open(CHARACTERS_DICT) as f:
st.session_state.characters_dict = json.load(f) st.session_state.characters_dict = json.load(f)
def show_graph(): def mermaid(code: str) -> None:
components.html(
f"""
<pre class="mermaid">
%%{{init: {{'themeVariables': {{ 'edgeLabelBackground': 'transparent'}}}}}}%%
flowchart TD;
{code}
linkStyle default fill:white,color:white,stroke-width:2px,background-color:lime;
</pre>
<script type="module">
import mermaid from 'https://cdn.jsdelivr.net/npm/mermaid@10/dist/mermaid.esm.min.mjs';
mermaid.initialize({{ startOnLoad: true }});
</script>
""", height=1000
)
def graph():
# Create a graphlib graph object # Create a graphlib graph object
if st.session_state.generated: if st.session_state.generated:
user, chatbot = [], [] system = [utterance for utterance in st.session_state.generated][-3:]
graph = graphviz.Digraph() user = [utterance for utterance in st.session_state.past][-2:]
chatbot = copy.deepcopy(st.session_state.generated) graph = ""
user = copy.deepcopy(st.session_state.past) for i, utterance in enumerate(system):
for x in range(len(user)): utterance = utterance.strip('\n')
chatbot_text = [word + '\n' if i % 5 == 0 and i > 0 else word for i, word in enumerate(chatbot[x].split(' '))] utterance = " ".join([word + '<br>' if i % 5 == 0 and i > 0 else word for i, word in enumerate(utterance.split(" "))])
user_text = [word + '\n' if i % 5 == 0 and i > 0 else word for i, word in enumerate(user[x].split(' '))] utterance = utterance.replace('\"', '')
graph.edge(' '.join(chatbot_text), ' '.join(user_text)) if i < len(user):
try: user[i] = user[i].strip('\n')
graph.edge(' '.join(user_text), ' '.join([word + '\n' if i % 5 == 0 and i > 0 else word for i, word in enumerate(chatbot[x + 1].split(' '))])) user[i] = user[i].replace('\"', '')
except: user[i] = " ".join([word + '<br>' if i % 5 == 0 and i > 0 else word for i, word in enumerate(user[i].split(' '))])
pass graph += f"{string.ascii_uppercase[i]}(\"{utterance}\") --> |{user[i]}| {string.ascii_uppercase[i+1]};"
st.graphviz_chart(graph) else:
graph += f"{string.ascii_uppercase[i]}(\"{utterance}\") --> {string.ascii_uppercase[i+1]}(...);style {string.ascii_uppercase[i+1]} fill:none,color:white;"
graph = graph.replace('\n', ' ')#replace(')','').replace('(','')
#print(graph)
return graph
def main() -> None: def main() -> None:
c1, c2 = st.columns(2) c1, c2 = st.columns(2)
with c1, c2: with c1, c2:
st.session_state.input_kind = c2.radio( character_type = c1.selectbox(label=st.session_state.locale.select_placeholder2, key="role",
label=st.session_state.locale.input_kind,
options=(st.session_state.locale.input_kind_1, st.session_state.locale.input_kind_2),
horizontal=True,
)
role_kind = c1.radio(
label=st.session_state.locale.radio_placeholder,
options=(st.session_state.locale.radio_text1, st.session_state.locale.radio_text2),
horizontal=True,
)
if role_kind == st.session_state.locale.radio_text1:
character_type = c2.selectbox(label=st.session_state.locale.select_placeholder2, key="role",
options=st.session_state.locale.ai_role_options) options=st.session_state.locale.ai_role_options)
st.session_state.dp.character = character_type st.session_state.dp.character = character_type
if character_type == 'default': if character_type == 'default':
@ -119,15 +123,13 @@ def main() -> None:
else: else:
st.session_state.dp.llm_rephrasing = True st.session_state.dp.llm_rephrasing = True
elif role_kind == st.session_state.locale.radio_text2:
c2.text_input(label=st.session_state.locale.select_placeholder3, key="role")
get_user_input() get_user_input()
show_chat_buttons() show_chat_buttons()
show_conversation() show_conversation()
with st.sidebar: with st.sidebar:
show_graph() mermaid(graph())
#show_graph()
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -12,7 +12,7 @@ from AMUseBotBackend.src.NLU.nlu import NLU
import os import os
from dotenv import load_dotenv from dotenv import load_dotenv
load_dotenv() load_dotenv('.env_template')
INTENT_DICT_PATH = os.getenv('INTENT_DICT_PATH') INTENT_DICT_PATH = os.getenv('INTENT_DICT_PATH')
MODEL_IDENTIFIER_PATH = os.getenv('MODEL_IDENTIFIER_PATH') MODEL_IDENTIFIER_PATH = os.getenv('MODEL_IDENTIFIER_PATH')

View File

@ -20,15 +20,8 @@ class Locale:
chat_clear_btn: str chat_clear_btn: str
chat_save_btn: str chat_save_btn: str
speak_btn: str speak_btn: str
input_kind: str
input_kind_1: str
input_kind_2: str
select_placeholder1: str select_placeholder1: str
select_placeholder2: str select_placeholder2: str
select_placeholder3: str
radio_placeholder: str
radio_text1: str
radio_text2: str
stt_placeholder: str stt_placeholder: str
footer_title: str footer_title: str
footer_option0: str footer_option0: str
@ -55,15 +48,8 @@ en = Locale(
chat_clear_btn="Clear", chat_clear_btn="Clear",
chat_save_btn="Save", chat_save_btn="Save",
speak_btn="Push to Speak", speak_btn="Push to Speak",
input_kind="Input Kind",
input_kind_1="Text",
input_kind_2="Voice [test mode]",
select_placeholder1="Select Model", select_placeholder1="Select Model",
select_placeholder2="Select Role", select_placeholder2="Select Role",
select_placeholder3="Create Role",
radio_placeholder="Role Interaction",
radio_text1="Select",
radio_text2="Create",
stt_placeholder="To Hear The Voice Of AI Press Play", stt_placeholder="To Hear The Voice Of AI Press Play",
footer_title="Support & Feedback", footer_title="Support & Feedback",
footer_option0="Chat", footer_option0="Chat",

View File

@ -1,6 +1,6 @@
{ {
"task_paraphrase": "You're currently reading a step of a recipe, paraphrese it so that it matches your charater: ", "task_paraphrase": "You're currently reading a step of a recipe, paraphrase it so that it matches your character: ",
"task_substitute": "A user has just asked for a substitute for a missing ingredient, answer him according to your character: ", "task_substitute": "A user has just asked for a substitute for a missing ingredient, answer him according to your character in one short sentence with at most 3 alternatives: ",
"model": "gpt-3.5-turbo-0613", "model": "gpt-3.5-turbo-0613",
"characters": { "characters": {
"default": { "default": {
@ -10,7 +10,7 @@
}, },
"helpful_chef": { "helpful_chef": {
"prompt": "You're a master chef known for treating everyone like your equal. ", "prompt": "You're a master chef known for treating everyone like your equal. ",
"task_specification": " Give your answer as a natural sounding, full English sentence." "task_specification": " Give your answer as a natural sounding, full English sentence. Keep the sentence length similar and do not make the language flowery."
}, },
"ramsay": { "ramsay": {

Some files were not shown because too many files have changed in this diff Show More