create Dockerfile
This commit is contained in:
parent
e6f5bc64fc
commit
59c4924080
16
Docker/Dockerfile
Normal file
16
Docker/Dockerfile
Normal file
@ -0,0 +1,16 @@
|
||||
# Nasz obraz będzie dzidziczył z obrazu Ubuntu w wersji latest
|
||||
FROM ubuntu:latest
|
||||
|
||||
# Instalujemy niezbędne zależności. Zwróć uwagę na flagę "-y" (assume yes)
|
||||
RUN apt update && apt install -y pip && apt install -y
|
||||
RUN pip install --user kaggle
|
||||
RUN pip install --user pandas
|
||||
RUN pip install --user numpy
|
||||
RUN pip install --user sklearn
|
||||
|
||||
WORKDIR /dataset
|
||||
COPY ./script.py ./
|
||||
COPY ./download.sh ./
|
||||
COPY ./imdb_top_1000.csv ./
|
||||
|
||||
CMD python3 ./script.py
|
11
Docker/download.sh
Normal file
11
Docker/download.sh
Normal file
@ -0,0 +1,11 @@
|
||||
#!/usr/bin/env bash
|
||||
mkdir ./.kaggle
|
||||
cd .kaggle
|
||||
touch kaggle.json
|
||||
echo "{\"username\":\"szymonparafinski\",\"key\":\"a95757bcf7f0def396b5294d971bf6dd\"}" >> kaggle.json
|
||||
|
||||
mkdir dataset
|
||||
cd dataset
|
||||
kaggle datasets download -d harshitshankhdhar/imdb-dataset-of-top-1000-movies-and-tv-shows
|
||||
unzip -o imdb-dataset-of-top-1000-movies-and-tv-shows.zip
|
||||
$SHELL
|
1001
Docker/imdb_top_1000.csv
Normal file
1001
Docker/imdb_top_1000.csv
Normal file
File diff suppressed because it is too large
Load Diff
30
Docker/script.py
Normal file
30
Docker/script.py
Normal file
@ -0,0 +1,30 @@
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from sklearn.model_selection import train_test_split
|
||||
|
||||
imbd_data = pd.read_csv('imdb_top_1000.csv')
|
||||
|
||||
imbd_data.drop(columns=["Poster_Link"], inplace=True)
|
||||
imbd_data.drop(columns=["Overview"], inplace=True)
|
||||
|
||||
imbd_data["Series_Title"] = imbd_data["Series_Title"].str.lower()
|
||||
imbd_data["Genre"] = imbd_data["Genre"].str.lower()
|
||||
imbd_data["Director"] = imbd_data["Director"].str.lower()
|
||||
imbd_data["Star1"] = imbd_data["Star1"].str.lower()
|
||||
imbd_data["Star2"] = imbd_data["Star2"].str.lower()
|
||||
imbd_data["Star3"] = imbd_data["Star3"].str.lower()
|
||||
imbd_data["Star4"] = imbd_data["Star4"].str.lower()
|
||||
|
||||
imbd_data = imbd_data.replace(np.nan, '', regex=True)
|
||||
imbd_data["Gross"] = imbd_data["Gross"].str.replace(',', '')
|
||||
imbd_data["Gross"] = pd.to_numeric(imbd_data["Gross"], errors='coerce')
|
||||
|
||||
imbd_data = imbd_data.dropna()
|
||||
|
||||
data_train, data_test = train_test_split(imbd_data, test_size=230, random_state=1)
|
||||
data_test, data_dev = train_test_split(data_test, test_size=115, random_state=1)
|
||||
|
||||
print("Dataset successfully divided into test/dev/train sets ")
|
||||
data_test.to_csv("data_test.csv", encoding="utf-8", index=False)
|
||||
data_dev.to_csv("data_dev.csv", encoding="utf-8", index=False)
|
||||
data_train.to_csv("data_train.csv", encoding="utf-8", index=False)
|
Loading…
Reference in New Issue
Block a user