first draft
This commit is contained in:
parent
e69318d649
commit
39827ef1ff
433
sw_lab9-10.ipynb
Normal file
433
sw_lab9-10.ipynb
Normal file
@ -0,0 +1,433 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"attachments": {},
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Aleksandra Jonas, Aleksandra Gronowska, Iwona Christop\n",
|
||||||
|
"# Zadanie 9-10\n",
|
||||||
|
"## - VGG16 + ResNet\n",
|
||||||
|
"## - AlexNet, VGG16, ResNet with lantvillage-dataset\n",
|
||||||
|
"## - data generation using Unity - Jacek Kaluzny\n",
|
||||||
|
"## - data augmentation - edge filters, rotation, textures"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"attachments": {},
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## VGG16 + ResNet on train_test_sw"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"attachments": {},
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Przygotowanie danych"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 1,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from IPython.display import Image, display"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
|
"id": "2fe63b50",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import sys\n",
|
||||||
|
"import subprocess\n",
|
||||||
|
"import pkg_resources\n",
|
||||||
|
"import numpy as np\n",
|
||||||
|
"\n",
|
||||||
|
"required = { 'scikit-image'}\n",
|
||||||
|
"installed = {pkg.key for pkg in pkg_resources.working_set}\n",
|
||||||
|
"missing = required - installed\n",
|
||||||
|
"# VGG16 requires images to be of dim = (224, 224, 3)\n",
|
||||||
|
"newSize = (224,224)\n",
|
||||||
|
"\n",
|
||||||
|
"if missing: \n",
|
||||||
|
" python = sys.executable\n",
|
||||||
|
" subprocess.check_call([python, '-m', 'pip', 'install', *missing], stdout=subprocess.DEVNULL)\n",
|
||||||
|
"\n",
|
||||||
|
"def load_train_data(input_dir):\n",
|
||||||
|
" import numpy as np\n",
|
||||||
|
" import pandas as pd\n",
|
||||||
|
" import os\n",
|
||||||
|
" from skimage.io import imread\n",
|
||||||
|
" import cv2 as cv\n",
|
||||||
|
" from pathlib import Path\n",
|
||||||
|
" import random\n",
|
||||||
|
" from shutil import copyfile, rmtree\n",
|
||||||
|
" import json\n",
|
||||||
|
"\n",
|
||||||
|
" import seaborn as sns\n",
|
||||||
|
" import matplotlib.pyplot as plt\n",
|
||||||
|
"\n",
|
||||||
|
" import matplotlib\n",
|
||||||
|
" \n",
|
||||||
|
" image_dir = Path(input_dir)\n",
|
||||||
|
" categories_name = []\n",
|
||||||
|
" for file in os.listdir(image_dir):\n",
|
||||||
|
" d = os.path.join(image_dir, file)\n",
|
||||||
|
" if os.path.isdir(d):\n",
|
||||||
|
" categories_name.append(file)\n",
|
||||||
|
"\n",
|
||||||
|
" folders = [directory for directory in image_dir.iterdir() if directory.is_dir()]\n",
|
||||||
|
"\n",
|
||||||
|
" train_img = []\n",
|
||||||
|
" categories_count=[]\n",
|
||||||
|
" labels=[]\n",
|
||||||
|
" for i, direc in enumerate(folders):\n",
|
||||||
|
" count = 0\n",
|
||||||
|
" for obj in direc.iterdir():\n",
|
||||||
|
" if os.path.isfile(obj) and os.path.basename(os.path.normpath(obj)) != 'desktop.ini':\n",
|
||||||
|
" labels.append(os.path.basename(os.path.normpath(direc)))\n",
|
||||||
|
" count += 1\n",
|
||||||
|
" img = imread(obj)#zwraca ndarry postaci xSize x ySize x colorDepth\n",
|
||||||
|
" img = img[:, :, :3]\n",
|
||||||
|
" img = cv.resize(img, newSize, interpolation=cv.INTER_AREA)# zwraca ndarray\n",
|
||||||
|
" img = img / 255 #normalizacja\n",
|
||||||
|
" train_img.append(img)\n",
|
||||||
|
" categories_count.append(count)\n",
|
||||||
|
" X={}\n",
|
||||||
|
" X[\"values\"] = np.array(train_img)\n",
|
||||||
|
" X[\"categories_name\"] = categories_name\n",
|
||||||
|
" X[\"categories_count\"] = categories_count\n",
|
||||||
|
" X[\"labels\"]=labels\n",
|
||||||
|
" return X\n",
|
||||||
|
"\n",
|
||||||
|
"def load_test_data(input_dir):\n",
|
||||||
|
" import numpy as np\n",
|
||||||
|
" import pandas as pd\n",
|
||||||
|
" import os\n",
|
||||||
|
" from skimage.io import imread\n",
|
||||||
|
" import cv2 as cv\n",
|
||||||
|
" from pathlib import Path\n",
|
||||||
|
" import random\n",
|
||||||
|
" from shutil import copyfile, rmtree\n",
|
||||||
|
" import json\n",
|
||||||
|
"\n",
|
||||||
|
" import seaborn as sns\n",
|
||||||
|
" import matplotlib.pyplot as plt\n",
|
||||||
|
"\n",
|
||||||
|
" import matplotlib\n",
|
||||||
|
"\n",
|
||||||
|
" image_path = Path(input_dir)\n",
|
||||||
|
"\n",
|
||||||
|
" labels_path = image_path.parents[0] / 'test_labels.json'\n",
|
||||||
|
"\n",
|
||||||
|
" jsonString = labels_path.read_text()\n",
|
||||||
|
" objects = json.loads(jsonString)\n",
|
||||||
|
"\n",
|
||||||
|
" categories_name = []\n",
|
||||||
|
" categories_count=[]\n",
|
||||||
|
" count = 0\n",
|
||||||
|
" c = objects[0]['value']\n",
|
||||||
|
" for e in objects:\n",
|
||||||
|
" if e['value'] != c:\n",
|
||||||
|
" categories_count.append(count)\n",
|
||||||
|
" c = e['value']\n",
|
||||||
|
" count = 1\n",
|
||||||
|
" else:\n",
|
||||||
|
" count += 1\n",
|
||||||
|
" if not e['value'] in categories_name:\n",
|
||||||
|
" categories_name.append(e['value'])\n",
|
||||||
|
"\n",
|
||||||
|
" categories_count.append(count)\n",
|
||||||
|
" \n",
|
||||||
|
" test_img = []\n",
|
||||||
|
"\n",
|
||||||
|
" labels=[]\n",
|
||||||
|
" for e in objects:\n",
|
||||||
|
" p = image_path / e['filename']\n",
|
||||||
|
" img = imread(p)#zwraca ndarry postaci xSize x ySize x colorDepth\n",
|
||||||
|
" img = img[:, :, :3]\n",
|
||||||
|
" img = cv.resize(img, newSize, interpolation=cv.INTER_AREA)# zwraca ndarray\n",
|
||||||
|
" img = img / 255#normalizacja\n",
|
||||||
|
" test_img.append(img)\n",
|
||||||
|
" labels.append(e['value'])\n",
|
||||||
|
"\n",
|
||||||
|
" X={}\n",
|
||||||
|
" X[\"values\"] = np.array(test_img)\n",
|
||||||
|
" X[\"categories_name\"] = categories_name\n",
|
||||||
|
" X[\"categories_count\"] = categories_count\n",
|
||||||
|
" X[\"labels\"]=labels\n",
|
||||||
|
" return X"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 3,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def create_tf_ds(X_train, y_train_enc, X_validate, y_validate_enc, X_test, y_test_enc):\n",
|
||||||
|
" import tensorflow as tf\n",
|
||||||
|
" \n",
|
||||||
|
" train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train_enc))\n",
|
||||||
|
" validation_ds = tf.data.Dataset.from_tensor_slices((X_validate, y_validate_enc))\n",
|
||||||
|
" test_ds = tf.data.Dataset.from_tensor_slices((X_test, y_test_enc))\n",
|
||||||
|
"\n",
|
||||||
|
" train_ds_size = tf.data.experimental.cardinality(train_ds).numpy()\n",
|
||||||
|
" test_ds_size = tf.data.experimental.cardinality(test_ds).numpy()\n",
|
||||||
|
" validation_ds_size = tf.data.experimental.cardinality(validation_ds).numpy()\n",
|
||||||
|
"\n",
|
||||||
|
" print(\"Training data size:\", train_ds_size)\n",
|
||||||
|
" print(\"Test data size:\", test_ds_size)\n",
|
||||||
|
" print(\"Validation data size:\", validation_ds_size)\n",
|
||||||
|
"\n",
|
||||||
|
" train_ds = (train_ds\n",
|
||||||
|
" .shuffle(buffer_size=train_ds_size)\n",
|
||||||
|
" .batch(batch_size=32, drop_remainder=True))\n",
|
||||||
|
" test_ds = (test_ds\n",
|
||||||
|
" .shuffle(buffer_size=train_ds_size)\n",
|
||||||
|
" .batch(batch_size=32, drop_remainder=True))\n",
|
||||||
|
" validation_ds = (validation_ds\n",
|
||||||
|
" .shuffle(buffer_size=train_ds_size)\n",
|
||||||
|
" .batch(batch_size=32, drop_remainder=True))\n",
|
||||||
|
" \n",
|
||||||
|
" return train_ds, test_ds, validation_ds"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def get_run_logdir(root_logdir):\n",
|
||||||
|
" import os\n",
|
||||||
|
" import time\n",
|
||||||
|
"\n",
|
||||||
|
" run_id = time.strftime(\"run_%Y_%m_%d-%H_%M_%S\")\n",
|
||||||
|
" return os.path.join(root_logdir, run_id)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 5,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def diagram_setup(model_name):\n",
|
||||||
|
" from tensorflow import keras\n",
|
||||||
|
" import os\n",
|
||||||
|
" \n",
|
||||||
|
" root_logdir = os.path.join(os.curdir, f\"logs\\\\fit\\\\{model_name}\\\\\")\n",
|
||||||
|
" \n",
|
||||||
|
" run_logdir = get_run_logdir(root_logdir)\n",
|
||||||
|
" tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 7,
|
||||||
|
"id": "cc941c5a",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"ename": "ModuleNotFoundError",
|
||||||
|
"evalue": "No module named 'seaborn'",
|
||||||
|
"output_type": "error",
|
||||||
|
"traceback": [
|
||||||
|
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
|
||||||
|
"\u001b[1;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)",
|
||||||
|
"Cell \u001b[1;32mIn [7], line 2\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[39m# Data load\u001b[39;00m\n\u001b[1;32m----> 2\u001b[0m data_train \u001b[39m=\u001b[39m load_train_data(\u001b[39m\"\u001b[39;49m\u001b[39m./train_test_sw/train_sw\u001b[39;49m\u001b[39m\"\u001b[39;49m)\n\u001b[0;32m 3\u001b[0m values_train \u001b[39m=\u001b[39m data_train[\u001b[39m'\u001b[39m\u001b[39mvalues\u001b[39m\u001b[39m'\u001b[39m]\n\u001b[0;32m 4\u001b[0m labels_train \u001b[39m=\u001b[39m data_train[\u001b[39m'\u001b[39m\u001b[39mlabels\u001b[39m\u001b[39m'\u001b[39m]\n",
|
||||||
|
"Cell \u001b[1;32mIn [2], line 27\u001b[0m, in \u001b[0;36mload_train_data\u001b[1;34m(input_dir)\u001b[0m\n\u001b[0;32m 24\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mshutil\u001b[39;00m \u001b[39mimport\u001b[39;00m copyfile, rmtree\n\u001b[0;32m 25\u001b[0m \u001b[39mimport\u001b[39;00m \u001b[39mjson\u001b[39;00m\n\u001b[1;32m---> 27\u001b[0m \u001b[39mimport\u001b[39;00m \u001b[39mseaborn\u001b[39;00m \u001b[39mas\u001b[39;00m \u001b[39msns\u001b[39;00m\n\u001b[0;32m 28\u001b[0m \u001b[39mimport\u001b[39;00m \u001b[39mmatplotlib\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mpyplot\u001b[39;00m \u001b[39mas\u001b[39;00m \u001b[39mplt\u001b[39;00m\n\u001b[0;32m 30\u001b[0m \u001b[39mimport\u001b[39;00m \u001b[39mmatplotlib\u001b[39;00m\n",
|
||||||
|
"\u001b[1;31mModuleNotFoundError\u001b[0m: No module named 'seaborn'"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# Data load\n",
|
||||||
|
"data_train = load_train_data(\"./train_test_sw/train_sw\")\n",
|
||||||
|
"values_train = data_train['values']\n",
|
||||||
|
"labels_train = data_train['labels']\n",
|
||||||
|
"\n",
|
||||||
|
"data_test = load_test_data(\"./train_test_sw/test_sw\")\n",
|
||||||
|
"X_test = data_test['values']\n",
|
||||||
|
"y_test = data_test['labels']"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "25040ac9",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from sklearn.model_selection import train_test_split\n",
|
||||||
|
"X_train, X_validate, y_train, y_validate = train_test_split(values_train, labels_train, test_size=0.2, random_state=42)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "a1fe47e6",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from sklearn.preprocessing import LabelEncoder\n",
|
||||||
|
"class_le = LabelEncoder()\n",
|
||||||
|
"y_train_enc = class_le.fit_transform(y_train)\n",
|
||||||
|
"y_validate_enc = class_le.fit_transform(y_validate)\n",
|
||||||
|
"y_test_enc = class_le.fit_transform(y_test)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"train_ds_vgg_sw, test_ds_vgg_sw, validation_ds_vgg_sw = create_tf_ds(X_train, y_train_enc, X_validate, y_validate_enc, X_test, y_test_enc)\n",
|
||||||
|
"diagram_setup('vgg_sw')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"attachments": {},
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"VGG"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import keras,os\n",
|
||||||
|
"from keras.models import Sequential\n",
|
||||||
|
"from keras.layers import Dense, Conv2D, MaxPool2D , Flatten\n",
|
||||||
|
"from keras.preprocessing.image import ImageDataGenerator\n",
|
||||||
|
"import numpy as np\n",
|
||||||
|
"\n",
|
||||||
|
"model_VGG = Sequential()\n",
|
||||||
|
"model_VGG.add(Conv2D(input_shape=(224,224,3),filters=64,kernel_size=(3,3),padding=\"same\", activation=\"relu\"))\n",
|
||||||
|
"model_VGG.add(Conv2D(filters=64,kernel_size=(3,3),padding=\"same\", activation=\"relu\"))\n",
|
||||||
|
"model_VGG.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))\n",
|
||||||
|
"model_VGG.add(Conv2D(filters=128, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n",
|
||||||
|
"model_VGG.add(Conv2D(filters=128, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n",
|
||||||
|
"model_VGG.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))\n",
|
||||||
|
"model_VGG.add(Conv2D(filters=256, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n",
|
||||||
|
"model_VGG.add(Conv2D(filters=256, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n",
|
||||||
|
"model_VGG.add(Conv2D(filters=256, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n",
|
||||||
|
"model_VGG.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))\n",
|
||||||
|
"model_VGG.add(Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n",
|
||||||
|
"model_VGG.add(Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n",
|
||||||
|
"model_VGG.add(Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n",
|
||||||
|
"model_VGG.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))\n",
|
||||||
|
"model_VGG.add(Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n",
|
||||||
|
"model_VGG.add(Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n",
|
||||||
|
"model_VGG.add(Conv2D(filters=512, kernel_size=(3,3), padding=\"same\", activation=\"relu\"))\n",
|
||||||
|
"model_VGG.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))\n",
|
||||||
|
"model_VGG.add(Flatten())\n",
|
||||||
|
"model_VGG.add(Dense(units=4096,activation=\"relu\"))\n",
|
||||||
|
"model_VGG.add(Dense(units=4096,activation=\"relu\"))\n",
|
||||||
|
"model_VGG.add(Dense(units=2, activation=\"softmax\"))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from keras.optimizers import Adam\n",
|
||||||
|
"opt = Adam(lr=0.001)\n",
|
||||||
|
"model_VGG.compile(optimizer=opt, loss=keras.losses.categorical_crossentropy, metrics=['accuracy'])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"model_VGG.summary()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from keras.callbacks import ModelCheckpoint, EarlyStopping\n",
|
||||||
|
"checkpoint = ModelCheckpoint(\"vgg16_1.h5\", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)\n",
|
||||||
|
"early = EarlyStopping(monitor='val_acc', min_delta=0, patience=20, verbose=1, mode='auto')\n",
|
||||||
|
"hist = model_VGG.fit_generator(steps_per_epoch=100,generator=train_ds_vgg_sw, validation_data= validation_ds_vgg_sw, validation_steps=10,epochs=5,callbacks=[checkpoint,early])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import matplotlib.pyplot as plt\n",
|
||||||
|
"plt.plot(hist.history[\"acc\"])\n",
|
||||||
|
"plt.plot(hist.history['val_acc'])\n",
|
||||||
|
"plt.plot(hist.history['loss'])\n",
|
||||||
|
"plt.plot(hist.history['val_loss'])\n",
|
||||||
|
"plt.title(\"model accuracy\")\n",
|
||||||
|
"plt.ylabel(\"Accuracy\")\n",
|
||||||
|
"plt.xlabel(\"Epoch\")\n",
|
||||||
|
"plt.legend([\"Accuracy\",\"Validation Accuracy\",\"loss\",\"Validation Loss\"])\n",
|
||||||
|
"plt.show()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.10.7"
|
||||||
|
},
|
||||||
|
"orig_nbformat": 4,
|
||||||
|
"vscode": {
|
||||||
|
"interpreter": {
|
||||||
|
"hash": "1b132c2ed43285dcf39f6d01712959169a14a721cf314fe69015adab49bb1fd1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 2
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user