{ "cells": [ { "cell_type": "markdown", "metadata": { "collapsed": false }, "source": [ "Bootstrapowa wersja testu t.\n", "Implementacja powinna obejmować test dla jednej próby, dla dwóch prób niezależnych oraz dla dwóch prób zależnych.\n", "W każdej sytuacji oczekiwanym wejście jest zbiór danych w odpowiednim formacie, a wyjściem p-wartość oraz ostateczna decyzja.\n", "Dodatkowo powinien być rysowany odpowiedni rozkład statystyki testowej." ] }, { "cell_type": "markdown", "metadata": { "collapsed": false }, "source": [ "Zbiór danych - ???\n", "Hipoteza zerowa - ???\n", "Hipoteza alternatywna - ???\n", "\n", "Dla każdego z 3 testów inne\n", "https://www.jmp.com/en_ch/statistics-knowledge-portal/t-test.html" ] }, { "cell_type": "code", "execution_count": 155, "outputs": [], "source": [ "# TODO: Poprzestawiać kolejność definicji funkcji?" ], "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } } }, { "cell_type": "code", "execution_count": 156, "metadata": { "pycharm": { "name": "#%%\n" } }, "outputs": [], "source": [ "import numpy as np\n", "import pandas as pd\n", "from math import sqrt\n", "from scipy.stats import sem\n", "from scipy.stats import t\n", "import matplotlib.pyplot as plt\n", "from statistics import mean, stdev\n", "from scipy.stats import ttest_ind, ttest_1samp, ttest_rel" ] }, { "cell_type": "code", "execution_count": 157, "metadata": { "pycharm": { "name": "#%%\n" } }, "outputs": [], "source": [ "def generate_bootstraps(data, n_bootstraps=100):\n", " data_size = data.shape[0]\n", " for _ in range(n_bootstraps):\n", " indices = np.random.choice(len(data), size=data_size)\n", " yield data.iloc[indices, :]" ] }, { "cell_type": "code", "execution_count": 158, "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } }, "outputs": [], "source": [ "def t_stat_single(sample, population_mean):\n", " \"\"\"Funkcja oblicza wartość statystyki testowej dla jednej próbki\"\"\"\n", " if sample.empty:\n", " raise Exception(\"Empty sample\")\n", " sample = sample[0].values.tolist()\n", " sample_size = len(sample)\n", " return (mean(sample) - population_mean) / (stdev(sample) / sqrt(sample_size))" ] }, { "cell_type": "code", "execution_count": 159, "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } }, "outputs": [], "source": [ "def t_stat_ind(sample_1, sample_2):\n", " \"\"\"Funkcja oblicza wartość statystyki testowej dla dwóch próbek niezależnych\"\"\"\n", " if sample_1.empty or sample_2.empty:\n", " raise Exception(\"Empty sample\")\n", " sample_1 = sample_1[0].values.tolist()\n", " sample_2 = sample_2[0].values.tolist()\n", " sed = sqrt(sem(sample_1)**2 + sem(sample_2)**2)\n", " return (mean(sample_1) - mean(sample_2)) / sed" ] }, { "cell_type": "code", "execution_count": 160, "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } }, "outputs": [], "source": [ "def t_stat_dep(sample_1, sample_2, mu=0):\n", " \"\"\"Funkcja oblicza wartość statystyki testowej dla dwóch próbek zależnych\"\"\"\n", " if sample_1.empty or sample_2.empty:\n", " raise Exception(\"Empty sample\")\n", " sample_1 = sample_1[0].values.tolist()\n", " sample_2 = sample_2[0].values.tolist()\n", " differences = [x_1 - x_2 for x_1, x_2 in zip(sample_1, sample_2)]\n", " sample_size = len(sample_1)\n", " return (mean(differences) - mu) / (stdev(differences) / sqrt(sample_size))" ] }, { "cell_type": "code", "execution_count": 161, "metadata": {}, "outputs": [], "source": [ "def df_dep(sample_1, sample_2):\n", " \"\"\"Funkcja oblicza stopnie swobody dla dwóch próbek zależnych\"\"\"\n", " l1, l2 = len(sample_1), len(sample_2)\n", " if l1 != l2:\n", " raise Exception(\"Samples aren't of equal length\")\n", " return l1" ] }, { "cell_type": "code", "execution_count": 162, "metadata": {}, "outputs": [], "source": [ "def df_ind(sample_1, sample_2):\n", " \"\"\"Funkcja oblicza stopnie swobody dla dwóch próbek niezależnych\"\"\"\n", " return len(sample_1) + len(sample_2) - 2" ] }, { "cell_type": "code", "execution_count": 163, "metadata": {}, "outputs": [], "source": [ "def df_single(sample_1):\n", " \"\"\"Funkcja oblicza stopnie swobody dla jednej próbki\"\"\"\n", " # TODO: I have no clue what to return from here\n", " return len(sample_1)" ] }, { "cell_type": "code", "execution_count": 164, "metadata": {}, "outputs": [], "source": [ "def calculate_p(t_stat, df):\n", " \"\"\"Funkcja oblicza wartość *p* na podstawie statystyki testowej i stopni swobody\"\"\"\n", " return (1.0 - t.cdf(abs(t_stat), df)) * 2.0" ] }, { "cell_type": "code", "execution_count": 165, "metadata": {}, "outputs": [], "source": [ "def calculate_cv(df, alpha=0.05):\n", " \"\"\"Funkcja oblicza wartość krytyczną (critical value)\"\"\"\n", " return t.ppf(1.0 - alpha, df)" ] }, { "cell_type": "code", "execution_count": 166, "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } }, "outputs": [], "source": [ "def bootstrap_one_sample(sample, population_mean):\n", " return t_test(\n", " sample_1=sample,\n", " df_fn=df_single,\n", " t_stat_fn=t_stat_single,\n", " population_mean=population_mean\n", " )" ] }, { "cell_type": "code", "execution_count": 167, "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } }, "outputs": [], "source": [ "def bootstrap_independent(sample_1, sample_2):\n", " return t_test(\n", " sample_1=sample_1,\n", " sample_2=sample_2,\n", " df_fn=df_ind,\n", " t_stat_fn=t_stat_ind\n", " )" ] }, { "cell_type": "code", "execution_count": 168, "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } }, "outputs": [], "source": [ "def bootstrap_dependent(sample_1, sample_2):\n", " return t_test(\n", " sample_1=sample_1,\n", " sample_2=sample_2,\n", " df_fn=df_dep,\n", " t_stat_fn=t_stat_dep\n", " )" ] }, { "cell_type": "code", "execution_count": 169, "metadata": {}, "outputs": [], "source": [ "def get_t_stats(sample_1, sample_2=None, t_stat_fn=t_stat_single, population_mean=None):\n", " \"\"\"Funkcja oblicza listę statystyk testowych dla każdej próbki bootstrapowej wybranej na podstawie danych sample_1 i sample_2\"\"\"\n", " t_stat_list = []\n", "\n", " # One sample test\n", " if t_stat_fn==t_stat_single:\n", " if not population_mean:\n", " raise Exception(\"population_mean not provided\")\n", " for bootstrap in generate_bootstraps(sample_1):\n", " stat = t_stat_fn(bootstrap, population_mean)\n", " t_stat_list.append(stat)\n", " return t_stat_list\n", "\n", " # Two sample test\n", " for bootstrap_1, bootstrap_2 in zip(generate_bootstraps(sample_1), generate_bootstraps(sample_2)):\n", " stat = t_stat_fn(bootstrap_1, bootstrap_2)\n", " t_stat_list.append(stat)\n", " return t_stat_list" ] }, { "cell_type": "code", "execution_count": 170, "metadata": { "pycharm": { "name": "#%%\n" } }, "outputs": [], "source": [ "def t_test(sample_1, sample_2=None, df_fn=df_single, t_stat_fn=t_stat_single, population_mean=None, alpha=0.05):\n", " \"\"\"\n", " Funkcja przeprowadza test T-studenta dla dwóch zmiennych.\n", " liczba kolumn wynosi 1, test jest przeprowadzany dla jednej zmiennej.\n", " @param df_fn - funkcja obliczająca stopnie swobody\n", " @param t_stat_fn - funkcja obliczająca statystykę T\n", " \"\"\"\n", " t_stat_list = get_t_stats(sample_1, sample_2, t_stat_fn, population_mean=population_mean)\n", " t_stat_sum = sum(t_stat_list)\n", "\n", " data_size = sample_1.shape[0]\n", "\n", " t_stat = t_stat_sum / data_size\n", " # TODO: dolna i górna opcja dają inne wyniki z jakiegoś powodu (???)\n", " t_stat = mean(t_stat_list)\n", "\n", " if sample_2 is None:\n", " df = df_fn(sample_1)\n", " else:\n", " df = df_fn(sample_1, sample_2)\n", " cv = calculate_cv(df, alpha)\n", " p = calculate_p(t_stat, df)\n", " return t_stat, df, cv, p, t_stat_list" ] }, { "cell_type": "code", "execution_count": 171, "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } }, "outputs": [], "source": [ "def draw_distribution(stats):\n", " \"\"\"\n", " Funkcja rysuje rozkład statystyki testowej\n", " @param stats: lista statystyk testowych\n", " \"\"\"\n", " plt.hist(stats)\n", " plt.xlabel('Test statistic value')\n", " plt.ylabel('Frequency')\n", " plt.show()" ] }, { "cell_type": "code", "execution_count": 172, "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Statystyka testowa dla jednej próby:\n", "1.414213562373095 - z naszej funkcji\n", "[1.41421356] - z gotowej biblioteki\n", "\n", "Statystyka testowa dla dwóch prób niezależnych:\n", "-3.0 - z naszej funkcji\n", "[-3.] - z gotowej biblioteki\n", "\n", "Statystyka testowa dla dwóch prób zależnych:\n", "-1.6329931618554525 - z naszej funkcji\n", "[-1.63299316] - z gotowej biblioteki\n", "\n" ] } ], "source": [ "# Testy dla samych statystyk testowych\n", "def pretty_print_stats(t_stat_selfmade, t_stat_lib, suffix):\n", " print(f'Statystyka testowa dla {suffix}:')\n", " print(t_stat_selfmade, '- z naszej funkcji')\n", " print(t_stat_lib, '- z gotowej biblioteki')\n", " print()\n", " \n", "dummy = pd.DataFrame([1, 2, 3, 4, 5])\n", "dummy2 = pd.DataFrame([4, 5, 6, 7, 8])\n", "dummy3 = pd.DataFrame([1, 3 , 3, 4, 6])\n", "\n", "t_stat_selfmade = t_stat_single(dummy, 2)\n", "t_stat_lib, _ = ttest_1samp(dummy, 2)\n", "pretty_print_stats(t_stat_selfmade, t_stat_lib, 'jednej próby')\n", "\n", "t_stat_selfmade = t_stat_ind(dummy, dummy2)\n", "t_stat_lib, _ = ttest_ind(dummy, dummy2)\n", "pretty_print_stats(t_stat_selfmade, t_stat_lib, 'dwóch prób niezależnych')\n", "\n", "t_stat_selfmade = t_stat_dep(dummy, dummy3)\n", "t_stat_lib, _ = ttest_rel(dummy, dummy3)\n", "pretty_print_stats(t_stat_selfmade, t_stat_lib, 'dwóch prób zależnych')" ] }, { "cell_type": "code", "execution_count": 173, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Statystyki dla jednej próby:\n", "t: 1.8524997668616348, df: 5, cv: 2.015048372669157, p: 0.12315232406912302\n", "\n", "Statystyki dla dwóch prób zależnych:\n", "t: 3.166992562129946, df: 5, cv: 2.015048372669157, p: 0.02489883191814224\n", "\n", "Statystyki dla dwóch prób niezależnych:\n", "t: 3.0429202631473986, df: 8, cv: 1.8595480375228421, p: 0.015992147409949586\n", "\n" ] } ], "source": [ "# Testy z bootstrappowaniem\n", "\n", "def pretty_print_full_stats(t_stat, df, cv, p):\n", " print(f't: {t_stat}, df: {df}, cv: {cv}, p: {p}\\n')\n", "\n", "print('Statystyki dla jednej próby:')\n", "t_stat, df, cv, p, _ = bootstrap_one_sample(dummy, 2)\n", "pretty_print_full_stats(t_stat, df, cv, p)\n", "\n", "print('Statystyki dla dwóch prób zależnych:')\n", "t_stat, df, cv, p, _ = bootstrap_dependent(dummy2, dummy3)\n", "pretty_print_full_stats(t_stat, df, cv, p)\n", "\n", "print('Statystyki dla dwóch prób niezależnych:')\n", "t_stat, df, cv, p, _ = bootstrap_independent(dummy2, dummy3)\n", "pretty_print_full_stats(t_stat, df, cv, p)" ] }, { "cell_type": "code", "execution_count": 174, "metadata": { "collapsed": false, "pycharm": { "name": "#%%\n" } }, "outputs": [], "source": [ "dataset = pd.read_csv('experiment_data.csv')\n", "#make_decision(dataset, ['Weight', 'Age'])" ] } ], "metadata": { "interpreter": { "hash": "11938c6bc6919ae2720b4d5011047913343b08a43b18698fd82dedb0d4417594" }, "kernelspec": { "display_name": "Python 3.9.1 64-bit", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.1" }, "orig_nbformat": 4 }, "nbformat": 4, "nbformat_minor": 2 }