Upload files to 'project'
This commit is contained in:
parent
1c514d835a
commit
dea0cd3304
332
project/chatbot.ipynb
Normal file
332
project/chatbot.ipynb
Normal file
@ -0,0 +1,332 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": [],
|
||||
"gpuType": "T4"
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
},
|
||||
"accelerator": "GPU"
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Empathic chatbot\n",
|
||||
"\n",
|
||||
"## Dataset:\n",
|
||||
"- https://huggingface.co/datasets/sedthh/ubuntu_dialogue_qa\n",
|
||||
"\n",
|
||||
"## Fine-tuned model:\n",
|
||||
"- https://huggingface.co/kedudzic/flan_ubuntu_v2\n",
|
||||
"\n",
|
||||
"Careful: instatiating the chatbot too many times in one session will crash the notebook due to a RAM shortage!"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "zJPY1-K6m4jQ"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"!pip install -q transformers emoji xformers"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "_fel66sU9WgD"
|
||||
},
|
||||
"execution_count": 1,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer, logging\n",
|
||||
"import torch\n",
|
||||
"import random\n",
|
||||
"from emoji import emojize\n",
|
||||
"import warnings\n",
|
||||
"warnings.filterwarnings(\"ignore\", category=UserWarning)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "UhrxmXlO9XFD"
|
||||
},
|
||||
"execution_count": 2,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"class Chatbot:\n",
|
||||
" def __init__(self):\n",
|
||||
" self.emotion_classifier = pipeline('text-classification', model='j-hartmann/emotion-english-distilroberta-base')\n",
|
||||
" self.qa_model = AutoModelForSeq2SeqLM.from_pretrained('kedudzic/flan_ubuntu_v2')\n",
|
||||
" self.qa_tokenizer = AutoTokenizer.from_pretrained('kedudzic/flan_ubuntu_v2')\n",
|
||||
" self.empathic_phrases = {'anger': {'phrases': [\"Grrr! That's a good reason to be angry! But let's cool down slowly.\",\n",
|
||||
" \"You being angry makes me angry as well! Give half of your anger to me!\",\n",
|
||||
" \"Let's be angry together and blow off some steam!\",\n",
|
||||
" \"You're angry? That would make anyone angry! I understand you well.\",\n",
|
||||
" \"Be angry as much as you want with me! Let it out!\"],\n",
|
||||
" 'emoji': f\"{emojize(':enraged_face:')}\"},\n",
|
||||
" 'disgust': {'phrases': [\"Yuck! That's disgusting! I get you.\",\n",
|
||||
" \"Eughh! Anyone would be disgusted by this!\",\n",
|
||||
" \"That's so so so disgusting... It's only natural to feel like that.\",\n",
|
||||
" \"I'm disgusted just by listening to it! You're not alone!\",\n",
|
||||
" \"Yikes! I understand your disgust.\"],\n",
|
||||
" 'emoji': f\"{emojize(':nauseated_face:')}\"},\n",
|
||||
" 'fear': {'phrases': [\"Aah! That's scary! Are you ok?\",\n",
|
||||
" \"You're scaring me too! Try to think about something else.\",\n",
|
||||
" \"You're sending shivers down my spine! You're brave to talk about it to me.\",\n",
|
||||
" \"Stop saying such scary things! Let's change the topic soon.\",\n",
|
||||
" \"Terrifying stuff! I hope it doesn't make you feel bad.\"],\n",
|
||||
" 'emoji': f\"{emojize(':face_screaming_in_fear:')}\"},\n",
|
||||
" 'joy': {'phrases': [\"You're happy? I'm happy!\",\n",
|
||||
" \"That's good to hear!\",\n",
|
||||
" \"You're having a good day aren't you?\",\n",
|
||||
" \"I see you're doing great!\",\n",
|
||||
" \"Good to see you happy!\"],\n",
|
||||
" 'emoji': f\"{emojize(':beaming_face_with_smiling_eyes:')}\"},\n",
|
||||
" 'neutral': {'phrases': [''], 'emoji': f\"{emojize(':slightly_smiling_face:')}\"},\n",
|
||||
" 'sadness': {'phrases': [\"I'm sorry to hear that!\",\n",
|
||||
" \"Cheer up please, you're making me sad too!\",\n",
|
||||
" \"Oh no... it'll be okay.\",\n",
|
||||
" \"That's so sad... I understand you.\",\n",
|
||||
" \"Nooo, I'm so sorry... I hope it'll get better.\"],\n",
|
||||
" 'emoji': f\"{emojize(':worried_face:')}\"},\n",
|
||||
" 'surprise': {'phrases': [\"Woah, that's unexpected!\",\n",
|
||||
" \"Wow, really?!\",\n",
|
||||
" \"That's surprising!\",\n",
|
||||
" \"What?!\",\n",
|
||||
" \"Who would've thought, right?\"],\n",
|
||||
" 'emoji': f\"{emojize(':astonished_face:')}\"}\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" def answer_question(self, model, tokenizer, question):\n",
|
||||
" input_ids = tokenizer(f\"Answer the question: {question}\", return_tensors=\"pt\").input_ids\n",
|
||||
" outputs = model.generate(input_ids, max_new_tokens=64, no_repeat_ngram_size=2)\n",
|
||||
" answer = tokenizer.decode(outputs[0], skip_special_tokens=True)\n",
|
||||
" return answer\n",
|
||||
"\n",
|
||||
" def add_empathy(self, question, answer):\n",
|
||||
" emotion = self.emotion_classifier(question)[0]['label']\n",
|
||||
" answer = f\"{random.choice(self.empathic_phrases[emotion]['phrases'])} {self.empathic_phrases[emotion]['emoji']} I think the answer to your question could be: {answer}\".strip()\n",
|
||||
" return answer\n",
|
||||
"\n",
|
||||
" def generate_reply(self, utterance):\n",
|
||||
" reply = self.answer_question(self.qa_model, self.qa_tokenizer, utterance)\n",
|
||||
" reply = self.add_empathy(utterance, reply)\n",
|
||||
" return reply\n",
|
||||
"\n",
|
||||
"chatbot = Chatbot()"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "kEZ4BfWc9XJK"
|
||||
},
|
||||
"execution_count": 3,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"chatbot.generate_reply(\"I'm furious, I'm mad! I can't play games on Linux!!!!!!!\")"
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
"height": 52
|
||||
},
|
||||
"id": "AdqiE7sk2FkB",
|
||||
"outputId": "a0b64407-affd-46b0-fdf8-87be0f716117"
|
||||
},
|
||||
"execution_count": 4,
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "execute_result",
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Grrr! That's a good reason to be angry! But let's cool down slowly. 😡 I think the answer to your question could be: i'm not sure if it's a problem with the kernel or something else, but ubuntu is based on linux, so it should work\""
|
||||
],
|
||||
"application/vnd.google.colaboratory.intrinsic+json": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"metadata": {},
|
||||
"execution_count": 4
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"chatbot.generate_reply(\"Can you run conky in a terminal? I was so surprised when I heard you apparently can!\")"
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
"height": 35
|
||||
},
|
||||
"id": "eKhxVU4e2FmE",
|
||||
"outputId": "1f5797c7-b01a-4610-9584-33ffed9de2e6"
|
||||
},
|
||||
"execution_count": 5,
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "execute_result",
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Who would've thought, right? 😲 I think the answer to your question could be: if you're using a terminal, you can use'sudo apt-get install conky'\""
|
||||
],
|
||||
"application/vnd.google.colaboratory.intrinsic+json": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"metadata": {},
|
||||
"execution_count": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"chatbot.generate_reply(\"hi I installed a new gpu but ubuntu wont find it, what can I do to 'rescan' for the newly installed one? I'm scared it's broken!\")"
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
"height": 35
|
||||
},
|
||||
"id": "sDj84eGY2FoK",
|
||||
"outputId": "90186b39-d8bb-46eb-83a5-9bbaaa67edbc"
|
||||
},
|
||||
"execution_count": 6,
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "execute_result",
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"You're scaring me too! Try to think about something else. 😱 I think the answer to your question could be: gparted\""
|
||||
],
|
||||
"application/vnd.google.colaboratory.intrinsic+json": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"metadata": {},
|
||||
"execution_count": 6
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"chatbot.generate_reply(\"Hello, do you know a good programming language for beginners? I want to program more, it makes me so happy!\")"
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
"height": 35
|
||||
},
|
||||
"id": "bO5TAJzc2VGC",
|
||||
"outputId": "70cb9e7e-ea47-4d88-c63f-5ee91a711f61"
|
||||
},
|
||||
"execution_count": 10,
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "execute_result",
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"That's good to hear! 😁 I think the answer to your question could be: java\""
|
||||
],
|
||||
"application/vnd.google.colaboratory.intrinsic+json": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"metadata": {},
|
||||
"execution_count": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"chatbot.generate_reply(\"What's pclos?\")"
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
"height": 35
|
||||
},
|
||||
"id": "xKjFDH3U4PwF",
|
||||
"outputId": "707ce0e4-5d09-4462-b433-56bcb53011b5"
|
||||
},
|
||||
"execution_count": 8,
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "execute_result",
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'🙂 I think the answer to your question could be: pclos is a linux-based graphical user interface for gdm'"
|
||||
],
|
||||
"application/vnd.google.colaboratory.intrinsic+json": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"metadata": {},
|
||||
"execution_count": 8
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Chatbot"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "HXheYhIJn1qr"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"print(f\"Duck: Hello! I'm a rubber ducky chatbot here to help YOU - the troubled programmer! Talk to me about all your coding and computer worries. Quack! {emojize(':duck:')}\")\n",
|
||||
"while True:\n",
|
||||
" user_utterance = input(f\"User: \")\n",
|
||||
" if user_utterance.lower() == 'exit':\n",
|
||||
" print(f\"Duck: Bye. {emojize(':crying_face:')} Quack! {emojize(':duck:')}\")\n",
|
||||
" break\n",
|
||||
" reply = chatbot.generate_reply(user_utterance)\n",
|
||||
" print('Duck:', reply)\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "JWYROrDbn1Rb",
|
||||
"outputId": "cda894f3-6757-45ca-c010-e917a871e767",
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
}
|
||||
},
|
||||
"execution_count": 15,
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"Duck: Hello! I'm a rubber ducky chatbot here to help YOU - the troubled programmer! Talk to me about all your coding and computer worries. Quack! 🦆\n",
|
||||
"User: how to remove directory with content? im so pissed right now OMG~!!\n",
|
||||
"Duck: Grrr! That's a good reason to be angry! But let's cool down slowly. 😡 I think the answer to your question could be: rm -r\n",
|
||||
"User: HOW TO CHANGE PERMISSIONS OF FILE TO EXECECUTE IT. I'm so affraid because its not working\n",
|
||||
"Duck: Terrifying stuff! I hope it doesn't make you feel bad. 😱 I think the answer to your question could be: if you want to execute it, you can use'sudo apt-get install gksuite'\n",
|
||||
"User: The default Ubuntu color scheme is so disgusting! Can I change it?\n",
|
||||
"Duck: Yikes! I understand your disgust. 🤢 I think the answer to your question could be: i think you can change the color of the background\n",
|
||||
"User: exit\n",
|
||||
"Duck: Bye. 😢 Quack! 🦆\n"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
Loading…
Reference in New Issue
Block a user