This commit is contained in:
Jakub Henyk 2023-05-11 21:49:21 +02:00
parent 63d63f0522
commit 4e0e16db4b
16 changed files with 5421 additions and 0 deletions

5
my_runs/5/config.json Normal file
View File

@ -0,0 +1,5 @@
{
"epochs": 10,
"learning_rate": 0.001,
"seed": 102385107
}

800
my_runs/5/cout.txt Normal file
View File

@ -0,0 +1,800 @@
tensor(1.9501, grad_fn=<NllLossBackward>)
tensor(2.2223, grad_fn=<NllLossBackward>)
tensor(2.0121, grad_fn=<NllLossBackward>)
tensor(2.0934, grad_fn=<NllLossBackward>)
tensor(2.1164, grad_fn=<NllLossBackward>)
tensor(1.7837, grad_fn=<NllLossBackward>)
tensor(1.8654, grad_fn=<NllLossBackward>)
tensor(1.8964, grad_fn=<NllLossBackward>)
tensor(2.0159, grad_fn=<NllLossBackward>)
tensor(2.0181, grad_fn=<NllLossBackward>)
tensor(1.8816, grad_fn=<NllLossBackward>)
tensor(2.0737, grad_fn=<NllLossBackward>)
tensor(1.9732, grad_fn=<NllLossBackward>)
tensor(2.1428, grad_fn=<NllLossBackward>)
tensor(1.7334, grad_fn=<NllLossBackward>)
tensor(1.9861, grad_fn=<NllLossBackward>)
tensor(1.7247, grad_fn=<NllLossBackward>)
tensor(1.9532, grad_fn=<NllLossBackward>)
tensor(2.0621, grad_fn=<NllLossBackward>)
tensor(1.8170, grad_fn=<NllLossBackward>)
tensor(1.9526, grad_fn=<NllLossBackward>)
tensor(1.9936, grad_fn=<NllLossBackward>)
tensor(1.9125, grad_fn=<NllLossBackward>)
tensor(1.8121, grad_fn=<NllLossBackward>)
tensor(1.9899, grad_fn=<NllLossBackward>)
tensor(2.1121, grad_fn=<NllLossBackward>)
tensor(2.0649, grad_fn=<NllLossBackward>)
tensor(1.9243, grad_fn=<NllLossBackward>)
tensor(1.8116, grad_fn=<NllLossBackward>)
tensor(1.7532, grad_fn=<NllLossBackward>)
tensor(1.7859, grad_fn=<NllLossBackward>)
tensor(1.5712, grad_fn=<NllLossBackward>)
tensor(1.8279, grad_fn=<NllLossBackward>)
tensor(1.7469, grad_fn=<NllLossBackward>)
tensor(1.7512, grad_fn=<NllLossBackward>)
tensor(1.6985, grad_fn=<NllLossBackward>)
tensor(1.7779, grad_fn=<NllLossBackward>)
tensor(1.8746, grad_fn=<NllLossBackward>)
tensor(1.8326, grad_fn=<NllLossBackward>)
tensor(1.7284, grad_fn=<NllLossBackward>)
tensor(1.5637, grad_fn=<NllLossBackward>)
tensor(1.7761, grad_fn=<NllLossBackward>)
tensor(1.7882, grad_fn=<NllLossBackward>)
tensor(1.7751, grad_fn=<NllLossBackward>)
tensor(1.7592, grad_fn=<NllLossBackward>)
tensor(1.8005, grad_fn=<NllLossBackward>)
tensor(1.7046, grad_fn=<NllLossBackward>)
tensor(1.6825, grad_fn=<NllLossBackward>)
tensor(1.6354, grad_fn=<NllLossBackward>)
tensor(1.6500, grad_fn=<NllLossBackward>)
tensor(1.5438, grad_fn=<NllLossBackward>)
tensor(1.6757, grad_fn=<NllLossBackward>)
tensor(1.5822, grad_fn=<NllLossBackward>)
tensor(1.7962, grad_fn=<NllLossBackward>)
tensor(1.7180, grad_fn=<NllLossBackward>)
tensor(1.7428, grad_fn=<NllLossBackward>)
tensor(1.5483, grad_fn=<NllLossBackward>)
tensor(1.5469, grad_fn=<NllLossBackward>)
tensor(1.5246, grad_fn=<NllLossBackward>)
tensor(1.4950, grad_fn=<NllLossBackward>)
tensor(1.4438, grad_fn=<NllLossBackward>)
tensor(1.5273, grad_fn=<NllLossBackward>)
tensor(1.5544, grad_fn=<NllLossBackward>)
tensor(1.4241, grad_fn=<NllLossBackward>)
tensor(1.4795, grad_fn=<NllLossBackward>)
tensor(1.6521, grad_fn=<NllLossBackward>)
tensor(1.5346, grad_fn=<NllLossBackward>)
tensor(1.5802, grad_fn=<NllLossBackward>)
tensor(1.5093, grad_fn=<NllLossBackward>)
tensor(1.6289, grad_fn=<NllLossBackward>)
tensor(1.5276, grad_fn=<NllLossBackward>)
tensor(1.4607, grad_fn=<NllLossBackward>)
tensor(1.4889, grad_fn=<NllLossBackward>)
tensor(1.5309, grad_fn=<NllLossBackward>)
tensor(1.5775, grad_fn=<NllLossBackward>)
tensor(1.5067, grad_fn=<NllLossBackward>)
tensor(1.5720, grad_fn=<NllLossBackward>)
tensor(1.2021, grad_fn=<NllLossBackward>)
tensor(1.4729, grad_fn=<NllLossBackward>)
tensor(1.5119, grad_fn=<NllLossBackward>)
tensor(1.5914, grad_fn=<NllLossBackward>)
tensor(1.5150, grad_fn=<NllLossBackward>)
tensor(1.5267, grad_fn=<NllLossBackward>)
tensor(1.4954, grad_fn=<NllLossBackward>)
tensor(1.4353, grad_fn=<NllLossBackward>)
tensor(1.4683, grad_fn=<NllLossBackward>)
tensor(1.4718, grad_fn=<NllLossBackward>)
tensor(1.5055, grad_fn=<NllLossBackward>)
tensor(1.4560, grad_fn=<NllLossBackward>)
tensor(1.4269, grad_fn=<NllLossBackward>)
tensor(1.3962, grad_fn=<NllLossBackward>)
tensor(1.4041, grad_fn=<NllLossBackward>)
tensor(1.3641, grad_fn=<NllLossBackward>)
tensor(1.4375, grad_fn=<NllLossBackward>)
tensor(1.3466, grad_fn=<NllLossBackward>)
tensor(1.3901, grad_fn=<NllLossBackward>)
tensor(1.3942, grad_fn=<NllLossBackward>)
tensor(1.3253, grad_fn=<NllLossBackward>)
tensor(1.3589, grad_fn=<NllLossBackward>)
tensor(1.2713, grad_fn=<NllLossBackward>)
tensor(1.3262, grad_fn=<NllLossBackward>)
tensor(1.3965, grad_fn=<NllLossBackward>)
tensor(1.3821, grad_fn=<NllLossBackward>)
tensor(1.3932, grad_fn=<NllLossBackward>)
tensor(1.2758, grad_fn=<NllLossBackward>)
tensor(1.3762, grad_fn=<NllLossBackward>)
tensor(1.3881, grad_fn=<NllLossBackward>)
tensor(1.3043, grad_fn=<NllLossBackward>)
tensor(1.2947, grad_fn=<NllLossBackward>)
tensor(1.3123, grad_fn=<NllLossBackward>)
tensor(1.4206, grad_fn=<NllLossBackward>)
tensor(1.4382, grad_fn=<NllLossBackward>)
tensor(1.3018, grad_fn=<NllLossBackward>)
tensor(1.3518, grad_fn=<NllLossBackward>)
tensor(1.3169, grad_fn=<NllLossBackward>)
tensor(1.4328, grad_fn=<NllLossBackward>)
tensor(1.1904, grad_fn=<NllLossBackward>)
tensor(1.3218, grad_fn=<NllLossBackward>)
tensor(1.4076, grad_fn=<NllLossBackward>)
tensor(1.3104, grad_fn=<NllLossBackward>)
tensor(1.3910, grad_fn=<NllLossBackward>)
tensor(1.2078, grad_fn=<NllLossBackward>)
tensor(1.3289, grad_fn=<NllLossBackward>)
tensor(1.2552, grad_fn=<NllLossBackward>)
tensor(1.2702, grad_fn=<NllLossBackward>)
tensor(1.2306, grad_fn=<NllLossBackward>)
tensor(1.2207, grad_fn=<NllLossBackward>)
tensor(1.3377, grad_fn=<NllLossBackward>)
tensor(1.2046, grad_fn=<NllLossBackward>)
tensor(1.2382, grad_fn=<NllLossBackward>)
tensor(1.1590, grad_fn=<NllLossBackward>)
tensor(1.1479, grad_fn=<NllLossBackward>)
tensor(1.1792, grad_fn=<NllLossBackward>)
tensor(1.1465, grad_fn=<NllLossBackward>)
tensor(1.2166, grad_fn=<NllLossBackward>)
tensor(1.1696, grad_fn=<NllLossBackward>)
tensor(1.1821, grad_fn=<NllLossBackward>)
tensor(1.2314, grad_fn=<NllLossBackward>)
tensor(1.2756, grad_fn=<NllLossBackward>)
tensor(1.2671, grad_fn=<NllLossBackward>)
tensor(1.2991, grad_fn=<NllLossBackward>)
tensor(1.2198, grad_fn=<NllLossBackward>)
tensor(1.3112, grad_fn=<NllLossBackward>)
tensor(1.2875, grad_fn=<NllLossBackward>)
tensor(1.1222, grad_fn=<NllLossBackward>)
tensor(1.2184, grad_fn=<NllLossBackward>)
tensor(1.1955, grad_fn=<NllLossBackward>)
tensor(1.1467, grad_fn=<NllLossBackward>)
tensor(1.2610, grad_fn=<NllLossBackward>)
tensor(1.2439, grad_fn=<NllLossBackward>)
tensor(1.0732, grad_fn=<NllLossBackward>)
tensor(1.0671, grad_fn=<NllLossBackward>)
tensor(1.1094, grad_fn=<NllLossBackward>)
tensor(1.1906, grad_fn=<NllLossBackward>)
tensor(1.2624, grad_fn=<NllLossBackward>)
tensor(1.2240, grad_fn=<NllLossBackward>)
tensor(1.2576, grad_fn=<NllLossBackward>)
tensor(1.1523, grad_fn=<NllLossBackward>)
tensor(1.0610, grad_fn=<NllLossBackward>)
tensor(1.1444, grad_fn=<NllLossBackward>)
tensor(1.0518, grad_fn=<NllLossBackward>)
tensor(1.1127, grad_fn=<NllLossBackward>)
tensor(1.0876, grad_fn=<NllLossBackward>)
tensor(1.0053, grad_fn=<NllLossBackward>)
tensor(1.1154, grad_fn=<NllLossBackward>)
tensor(1.0077, grad_fn=<NllLossBackward>)
tensor(1.1619, grad_fn=<NllLossBackward>)
tensor(1.1994, grad_fn=<NllLossBackward>)
tensor(1.2479, grad_fn=<NllLossBackward>)
tensor(0.9788, grad_fn=<NllLossBackward>)
tensor(1.0385, grad_fn=<NllLossBackward>)
tensor(1.1875, grad_fn=<NllLossBackward>)
tensor(1.1982, grad_fn=<NllLossBackward>)
tensor(1.1858, grad_fn=<NllLossBackward>)
tensor(1.1008, grad_fn=<NllLossBackward>)
tensor(1.0502, grad_fn=<NllLossBackward>)
tensor(1.0356, grad_fn=<NllLossBackward>)
tensor(1.0291, grad_fn=<NllLossBackward>)
tensor(1.0144, grad_fn=<NllLossBackward>)
tensor(1.2293, grad_fn=<NllLossBackward>)
tensor(1.2292, grad_fn=<NllLossBackward>)
tensor(1.1344, grad_fn=<NllLossBackward>)
tensor(1.0412, grad_fn=<NllLossBackward>)
tensor(1.1474, grad_fn=<NllLossBackward>)
tensor(0.9736, grad_fn=<NllLossBackward>)
tensor(1.0998, grad_fn=<NllLossBackward>)
tensor(1.2037, grad_fn=<NllLossBackward>)
tensor(1.1035, grad_fn=<NllLossBackward>)
tensor(0.9991, grad_fn=<NllLossBackward>)
tensor(1.0947, grad_fn=<NllLossBackward>)
tensor(1.0100, grad_fn=<NllLossBackward>)
tensor(1.0542, grad_fn=<NllLossBackward>)
tensor(1.1516, grad_fn=<NllLossBackward>)
tensor(1.0869, grad_fn=<NllLossBackward>)
tensor(1.1072, grad_fn=<NllLossBackward>)
tensor(1.0855, grad_fn=<NllLossBackward>)
tensor(1.0719, grad_fn=<NllLossBackward>)
tensor(1.0497, grad_fn=<NllLossBackward>)
tensor(1.0355, grad_fn=<NllLossBackward>)
tensor(1.0872, grad_fn=<NllLossBackward>)
tensor(1.0117, grad_fn=<NllLossBackward>)
tensor(0.9536, grad_fn=<NllLossBackward>)
tensor(1.1327, grad_fn=<NllLossBackward>)
tensor(0.8925, grad_fn=<NllLossBackward>)
tensor(1.1434, grad_fn=<NllLossBackward>)
tensor(1.0641, grad_fn=<NllLossBackward>)
tensor(1.1626, grad_fn=<NllLossBackward>)
tensor(1.1528, grad_fn=<NllLossBackward>)
tensor(1.3020, grad_fn=<NllLossBackward>)
tensor(0.9304, grad_fn=<NllLossBackward>)
tensor(1.1756, grad_fn=<NllLossBackward>)
tensor(1.0326, grad_fn=<NllLossBackward>)
tensor(1.1883, grad_fn=<NllLossBackward>)
tensor(0.9641, grad_fn=<NllLossBackward>)
tensor(1.1524, grad_fn=<NllLossBackward>)
tensor(1.1084, grad_fn=<NllLossBackward>)
tensor(0.9375, grad_fn=<NllLossBackward>)
tensor(1.1257, grad_fn=<NllLossBackward>)
tensor(0.9931, grad_fn=<NllLossBackward>)
tensor(0.9661, grad_fn=<NllLossBackward>)
tensor(1.0908, grad_fn=<NllLossBackward>)
tensor(1.2453, grad_fn=<NllLossBackward>)
tensor(1.0393, grad_fn=<NllLossBackward>)
tensor(1.0218, grad_fn=<NllLossBackward>)
tensor(0.9613, grad_fn=<NllLossBackward>)
tensor(0.9365, grad_fn=<NllLossBackward>)
tensor(1.1133, grad_fn=<NllLossBackward>)
tensor(1.0401, grad_fn=<NllLossBackward>)
tensor(0.9598, grad_fn=<NllLossBackward>)
tensor(0.9377, grad_fn=<NllLossBackward>)
tensor(1.0843, grad_fn=<NllLossBackward>)
tensor(0.9690, grad_fn=<NllLossBackward>)
tensor(0.9090, grad_fn=<NllLossBackward>)
tensor(0.8713, grad_fn=<NllLossBackward>)
tensor(1.1776, grad_fn=<NllLossBackward>)
tensor(1.0289, grad_fn=<NllLossBackward>)
tensor(1.1626, grad_fn=<NllLossBackward>)
tensor(1.0496, grad_fn=<NllLossBackward>)
tensor(1.0960, grad_fn=<NllLossBackward>)
tensor(1.0211, grad_fn=<NllLossBackward>)
tensor(1.0563, grad_fn=<NllLossBackward>)
tensor(1.1636, grad_fn=<NllLossBackward>)
tensor(1.1681, grad_fn=<NllLossBackward>)
tensor(1.0832, grad_fn=<NllLossBackward>)
tensor(1.0135, grad_fn=<NllLossBackward>)
tensor(0.8882, grad_fn=<NllLossBackward>)
tensor(0.9941, grad_fn=<NllLossBackward>)
tensor(1.1249, grad_fn=<NllLossBackward>)
tensor(0.9753, grad_fn=<NllLossBackward>)
tensor(1.0903, grad_fn=<NllLossBackward>)
tensor(0.9399, grad_fn=<NllLossBackward>)
tensor(1.1707, grad_fn=<NllLossBackward>)
tensor(1.0738, grad_fn=<NllLossBackward>)
tensor(1.0737, grad_fn=<NllLossBackward>)
tensor(0.9548, grad_fn=<NllLossBackward>)
tensor(1.0821, grad_fn=<NllLossBackward>)
tensor(1.0017, grad_fn=<NllLossBackward>)
tensor(0.9806, grad_fn=<NllLossBackward>)
tensor(0.9482, grad_fn=<NllLossBackward>)
tensor(0.9815, grad_fn=<NllLossBackward>)
tensor(0.9502, grad_fn=<NllLossBackward>)
tensor(1.0242, grad_fn=<NllLossBackward>)
tensor(0.9225, grad_fn=<NllLossBackward>)
tensor(0.8997, grad_fn=<NllLossBackward>)
tensor(0.9809, grad_fn=<NllLossBackward>)
tensor(1.0378, grad_fn=<NllLossBackward>)
tensor(0.9639, grad_fn=<NllLossBackward>)
tensor(0.8917, grad_fn=<NllLossBackward>)
tensor(0.8864, grad_fn=<NllLossBackward>)
tensor(0.9497, grad_fn=<NllLossBackward>)
tensor(1.0174, grad_fn=<NllLossBackward>)
tensor(1.0068, grad_fn=<NllLossBackward>)
tensor(0.8840, grad_fn=<NllLossBackward>)
tensor(1.0105, grad_fn=<NllLossBackward>)
tensor(0.9752, grad_fn=<NllLossBackward>)
tensor(0.9361, grad_fn=<NllLossBackward>)
tensor(1.1132, grad_fn=<NllLossBackward>)
tensor(0.9312, grad_fn=<NllLossBackward>)
tensor(0.7768, grad_fn=<NllLossBackward>)
tensor(0.8946, grad_fn=<NllLossBackward>)
tensor(0.9688, grad_fn=<NllLossBackward>)
tensor(0.8929, grad_fn=<NllLossBackward>)
tensor(0.9761, grad_fn=<NllLossBackward>)
tensor(0.7978, grad_fn=<NllLossBackward>)
tensor(0.8562, grad_fn=<NllLossBackward>)
tensor(1.0388, grad_fn=<NllLossBackward>)
tensor(0.9373, grad_fn=<NllLossBackward>)
tensor(0.9336, grad_fn=<NllLossBackward>)
tensor(1.0948, grad_fn=<NllLossBackward>)
tensor(0.9178, grad_fn=<NllLossBackward>)
tensor(0.7934, grad_fn=<NllLossBackward>)
tensor(1.1194, grad_fn=<NllLossBackward>)
tensor(0.6673, grad_fn=<NllLossBackward>)
tensor(0.9399, grad_fn=<NllLossBackward>)
tensor(0.9574, grad_fn=<NllLossBackward>)
tensor(0.9509, grad_fn=<NllLossBackward>)
tensor(0.8586, grad_fn=<NllLossBackward>)
tensor(1.1297, grad_fn=<NllLossBackward>)
tensor(0.8019, grad_fn=<NllLossBackward>)
tensor(0.8081, grad_fn=<NllLossBackward>)
tensor(0.9387, grad_fn=<NllLossBackward>)
tensor(1.0109, grad_fn=<NllLossBackward>)
tensor(1.0592, grad_fn=<NllLossBackward>)
tensor(0.8778, grad_fn=<NllLossBackward>)
tensor(0.7532, grad_fn=<NllLossBackward>)
tensor(0.9831, grad_fn=<NllLossBackward>)
tensor(0.9295, grad_fn=<NllLossBackward>)
tensor(0.9273, grad_fn=<NllLossBackward>)
tensor(0.7610, grad_fn=<NllLossBackward>)
tensor(0.9485, grad_fn=<NllLossBackward>)
tensor(0.7574, grad_fn=<NllLossBackward>)
tensor(0.8195, grad_fn=<NllLossBackward>)
tensor(1.0316, grad_fn=<NllLossBackward>)
tensor(0.8862, grad_fn=<NllLossBackward>)
tensor(0.8881, grad_fn=<NllLossBackward>)
tensor(0.8846, grad_fn=<NllLossBackward>)
tensor(0.9182, grad_fn=<NllLossBackward>)
tensor(0.8645, grad_fn=<NllLossBackward>)
tensor(0.8543, grad_fn=<NllLossBackward>)
tensor(0.9158, grad_fn=<NllLossBackward>)
tensor(0.8752, grad_fn=<NllLossBackward>)
tensor(0.8406, grad_fn=<NllLossBackward>)
tensor(1.0119, grad_fn=<NllLossBackward>)
tensor(0.8996, grad_fn=<NllLossBackward>)
tensor(0.9459, grad_fn=<NllLossBackward>)
tensor(0.7835, grad_fn=<NllLossBackward>)
tensor(0.8999, grad_fn=<NllLossBackward>)
tensor(0.8183, grad_fn=<NllLossBackward>)
tensor(1.0095, grad_fn=<NllLossBackward>)
tensor(1.0225, grad_fn=<NllLossBackward>)
tensor(1.0782, grad_fn=<NllLossBackward>)
tensor(0.7733, grad_fn=<NllLossBackward>)
tensor(1.0321, grad_fn=<NllLossBackward>)
tensor(1.0843, grad_fn=<NllLossBackward>)
tensor(0.9460, grad_fn=<NllLossBackward>)
tensor(0.9670, grad_fn=<NllLossBackward>)
tensor(0.6868, grad_fn=<NllLossBackward>)
tensor(0.7530, grad_fn=<NllLossBackward>)
tensor(0.9524, grad_fn=<NllLossBackward>)
tensor(1.0273, grad_fn=<NllLossBackward>)
tensor(0.9708, grad_fn=<NllLossBackward>)
tensor(0.9831, grad_fn=<NllLossBackward>)
tensor(0.8516, grad_fn=<NllLossBackward>)
tensor(0.7161, grad_fn=<NllLossBackward>)
tensor(0.8612, grad_fn=<NllLossBackward>)
tensor(0.8369, grad_fn=<NllLossBackward>)
tensor(0.9160, grad_fn=<NllLossBackward>)
tensor(1.0154, grad_fn=<NllLossBackward>)
tensor(0.9693, grad_fn=<NllLossBackward>)
tensor(0.8537, grad_fn=<NllLossBackward>)
tensor(1.0123, grad_fn=<NllLossBackward>)
tensor(0.8139, grad_fn=<NllLossBackward>)
tensor(0.7824, grad_fn=<NllLossBackward>)
tensor(0.9480, grad_fn=<NllLossBackward>)
tensor(0.6689, grad_fn=<NllLossBackward>)
tensor(0.6494, grad_fn=<NllLossBackward>)
tensor(0.8174, grad_fn=<NllLossBackward>)
tensor(1.1185, grad_fn=<NllLossBackward>)
tensor(0.9023, grad_fn=<NllLossBackward>)
tensor(0.6259, grad_fn=<NllLossBackward>)
tensor(0.9378, grad_fn=<NllLossBackward>)
tensor(0.9920, grad_fn=<NllLossBackward>)
tensor(0.8174, grad_fn=<NllLossBackward>)
tensor(1.0048, grad_fn=<NllLossBackward>)
tensor(0.7076, grad_fn=<NllLossBackward>)
tensor(1.0242, grad_fn=<NllLossBackward>)
tensor(0.8934, grad_fn=<NllLossBackward>)
tensor(0.9217, grad_fn=<NllLossBackward>)
tensor(0.6871, grad_fn=<NllLossBackward>)
tensor(0.7047, grad_fn=<NllLossBackward>)
tensor(1.0242, grad_fn=<NllLossBackward>)
tensor(0.8955, grad_fn=<NllLossBackward>)
tensor(0.9663, grad_fn=<NllLossBackward>)
tensor(1.0107, grad_fn=<NllLossBackward>)
tensor(0.7729, grad_fn=<NllLossBackward>)
tensor(1.0800, grad_fn=<NllLossBackward>)
tensor(0.7442, grad_fn=<NllLossBackward>)
tensor(1.0098, grad_fn=<NllLossBackward>)
tensor(0.7588, grad_fn=<NllLossBackward>)
tensor(1.2068, grad_fn=<NllLossBackward>)
tensor(0.7489, grad_fn=<NllLossBackward>)
tensor(0.9795, grad_fn=<NllLossBackward>)
tensor(0.9577, grad_fn=<NllLossBackward>)
tensor(1.0788, grad_fn=<NllLossBackward>)
tensor(0.9159, grad_fn=<NllLossBackward>)
tensor(0.8914, grad_fn=<NllLossBackward>)
tensor(0.8127, grad_fn=<NllLossBackward>)
tensor(0.7898, grad_fn=<NllLossBackward>)
tensor(1.0606, grad_fn=<NllLossBackward>)
tensor(1.0155, grad_fn=<NllLossBackward>)
tensor(0.7712, grad_fn=<NllLossBackward>)
tensor(0.6497, grad_fn=<NllLossBackward>)
tensor(1.0057, grad_fn=<NllLossBackward>)
tensor(0.9467, grad_fn=<NllLossBackward>)
tensor(0.9401, grad_fn=<NllLossBackward>)
tensor(0.7897, grad_fn=<NllLossBackward>)
tensor(0.7482, grad_fn=<NllLossBackward>)
tensor(0.9641, grad_fn=<NllLossBackward>)
tensor(1.0554, grad_fn=<NllLossBackward>)
tensor(1.0126, grad_fn=<NllLossBackward>)
tensor(0.8410, grad_fn=<NllLossBackward>)
tensor(1.0382, grad_fn=<NllLossBackward>)
tensor(0.7110, grad_fn=<NllLossBackward>)
tensor(1.0870, grad_fn=<NllLossBackward>)
tensor(0.8825, grad_fn=<NllLossBackward>)
tensor(0.7345, grad_fn=<NllLossBackward>)
tensor(0.7696, grad_fn=<NllLossBackward>)
tensor(1.1410, grad_fn=<NllLossBackward>)
tensor(0.7742, grad_fn=<NllLossBackward>)
tensor(0.9176, grad_fn=<NllLossBackward>)
tensor(1.0155, grad_fn=<NllLossBackward>)
tensor(1.0921, grad_fn=<NllLossBackward>)
tensor(0.9623, grad_fn=<NllLossBackward>)
tensor(1.0362, grad_fn=<NllLossBackward>)
tensor(0.8444, grad_fn=<NllLossBackward>)
tensor(0.7857, grad_fn=<NllLossBackward>)
tensor(1.0489, grad_fn=<NllLossBackward>)
tensor(1.0134, grad_fn=<NllLossBackward>)
tensor(0.8765, grad_fn=<NllLossBackward>)
tensor(0.7957, grad_fn=<NllLossBackward>)
tensor(0.8286, grad_fn=<NllLossBackward>)
tensor(0.9569, grad_fn=<NllLossBackward>)
tensor(0.9436, grad_fn=<NllLossBackward>)
tensor(1.0643, grad_fn=<NllLossBackward>)
tensor(0.7210, grad_fn=<NllLossBackward>)
tensor(1.1423, grad_fn=<NllLossBackward>)
tensor(0.6993, grad_fn=<NllLossBackward>)
tensor(1.1189, grad_fn=<NllLossBackward>)
tensor(0.7113, grad_fn=<NllLossBackward>)
tensor(1.0083, grad_fn=<NllLossBackward>)
tensor(0.8830, grad_fn=<NllLossBackward>)
tensor(0.8819, grad_fn=<NllLossBackward>)
tensor(0.6633, grad_fn=<NllLossBackward>)
tensor(0.9553, grad_fn=<NllLossBackward>)
tensor(0.8249, grad_fn=<NllLossBackward>)
tensor(0.8438, grad_fn=<NllLossBackward>)
tensor(1.0958, grad_fn=<NllLossBackward>)
tensor(0.8441, grad_fn=<NllLossBackward>)
tensor(0.8337, grad_fn=<NllLossBackward>)
tensor(0.8428, grad_fn=<NllLossBackward>)
tensor(0.7226, grad_fn=<NllLossBackward>)
tensor(0.7742, grad_fn=<NllLossBackward>)
tensor(0.8664, grad_fn=<NllLossBackward>)
tensor(0.8543, grad_fn=<NllLossBackward>)
tensor(0.9375, grad_fn=<NllLossBackward>)
tensor(0.9353, grad_fn=<NllLossBackward>)
tensor(0.9261, grad_fn=<NllLossBackward>)
tensor(0.6823, grad_fn=<NllLossBackward>)
tensor(0.9306, grad_fn=<NllLossBackward>)
tensor(0.7585, grad_fn=<NllLossBackward>)
tensor(0.7547, grad_fn=<NllLossBackward>)
tensor(0.8512, grad_fn=<NllLossBackward>)
tensor(0.7969, grad_fn=<NllLossBackward>)
tensor(0.8188, grad_fn=<NllLossBackward>)
tensor(0.8881, grad_fn=<NllLossBackward>)
tensor(0.7456, grad_fn=<NllLossBackward>)
tensor(0.7876, grad_fn=<NllLossBackward>)
tensor(0.8903, grad_fn=<NllLossBackward>)
tensor(0.9978, grad_fn=<NllLossBackward>)
tensor(0.7180, grad_fn=<NllLossBackward>)
tensor(0.7099, grad_fn=<NllLossBackward>)
tensor(0.9319, grad_fn=<NllLossBackward>)
tensor(1.0009, grad_fn=<NllLossBackward>)
tensor(1.0830, grad_fn=<NllLossBackward>)
tensor(0.9695, grad_fn=<NllLossBackward>)
tensor(0.8626, grad_fn=<NllLossBackward>)
tensor(0.8921, grad_fn=<NllLossBackward>)
tensor(0.6693, grad_fn=<NllLossBackward>)
tensor(0.9020, grad_fn=<NllLossBackward>)
tensor(0.7643, grad_fn=<NllLossBackward>)
tensor(0.7539, grad_fn=<NllLossBackward>)
tensor(0.9065, grad_fn=<NllLossBackward>)
tensor(1.0999, grad_fn=<NllLossBackward>)
tensor(0.7682, grad_fn=<NllLossBackward>)
tensor(0.8437, grad_fn=<NllLossBackward>)
tensor(0.8154, grad_fn=<NllLossBackward>)
tensor(0.9244, grad_fn=<NllLossBackward>)
tensor(0.8746, grad_fn=<NllLossBackward>)
tensor(0.9351, grad_fn=<NllLossBackward>)
tensor(0.8342, grad_fn=<NllLossBackward>)
tensor(0.7453, grad_fn=<NllLossBackward>)
tensor(0.7781, grad_fn=<NllLossBackward>)
tensor(0.9365, grad_fn=<NllLossBackward>)
tensor(0.8738, grad_fn=<NllLossBackward>)
tensor(0.7135, grad_fn=<NllLossBackward>)
tensor(0.6970, grad_fn=<NllLossBackward>)
tensor(0.9434, grad_fn=<NllLossBackward>)
tensor(0.8135, grad_fn=<NllLossBackward>)
tensor(0.8841, grad_fn=<NllLossBackward>)
tensor(0.7391, grad_fn=<NllLossBackward>)
tensor(0.9001, grad_fn=<NllLossBackward>)
tensor(0.9221, grad_fn=<NllLossBackward>)
tensor(0.7609, grad_fn=<NllLossBackward>)
tensor(0.8406, grad_fn=<NllLossBackward>)
tensor(0.8932, grad_fn=<NllLossBackward>)
tensor(0.9226, grad_fn=<NllLossBackward>)
tensor(0.8693, grad_fn=<NllLossBackward>)
tensor(0.7715, grad_fn=<NllLossBackward>)
tensor(0.8608, grad_fn=<NllLossBackward>)
tensor(0.7173, grad_fn=<NllLossBackward>)
tensor(0.9610, grad_fn=<NllLossBackward>)
tensor(0.9863, grad_fn=<NllLossBackward>)
tensor(0.8757, grad_fn=<NllLossBackward>)
tensor(0.6149, grad_fn=<NllLossBackward>)
tensor(0.7463, grad_fn=<NllLossBackward>)
tensor(0.6069, grad_fn=<NllLossBackward>)
tensor(0.7986, grad_fn=<NllLossBackward>)
tensor(0.9223, grad_fn=<NllLossBackward>)
tensor(0.6822, grad_fn=<NllLossBackward>)
tensor(1.0679, grad_fn=<NllLossBackward>)
tensor(0.9549, grad_fn=<NllLossBackward>)
tensor(0.7737, grad_fn=<NllLossBackward>)
tensor(0.8752, grad_fn=<NllLossBackward>)
tensor(0.8067, grad_fn=<NllLossBackward>)
tensor(0.9005, grad_fn=<NllLossBackward>)
tensor(0.6203, grad_fn=<NllLossBackward>)
tensor(0.8862, grad_fn=<NllLossBackward>)
tensor(0.9147, grad_fn=<NllLossBackward>)
tensor(0.8350, grad_fn=<NllLossBackward>)
tensor(0.9818, grad_fn=<NllLossBackward>)
tensor(0.8729, grad_fn=<NllLossBackward>)
tensor(0.8714, grad_fn=<NllLossBackward>)
tensor(0.7541, grad_fn=<NllLossBackward>)
tensor(0.7650, grad_fn=<NllLossBackward>)
tensor(0.7602, grad_fn=<NllLossBackward>)
tensor(0.6429, grad_fn=<NllLossBackward>)
tensor(0.7672, grad_fn=<NllLossBackward>)
tensor(0.7339, grad_fn=<NllLossBackward>)
tensor(0.9862, grad_fn=<NllLossBackward>)
tensor(0.7022, grad_fn=<NllLossBackward>)
tensor(1.0731, grad_fn=<NllLossBackward>)
tensor(1.0787, grad_fn=<NllLossBackward>)
tensor(0.7596, grad_fn=<NllLossBackward>)
tensor(0.8739, grad_fn=<NllLossBackward>)
tensor(0.7700, grad_fn=<NllLossBackward>)
tensor(0.9227, grad_fn=<NllLossBackward>)
tensor(0.7920, grad_fn=<NllLossBackward>)
tensor(0.6786, grad_fn=<NllLossBackward>)
tensor(0.8800, grad_fn=<NllLossBackward>)
tensor(1.0467, grad_fn=<NllLossBackward>)
tensor(0.8438, grad_fn=<NllLossBackward>)
tensor(0.9326, grad_fn=<NllLossBackward>)
tensor(0.7184, grad_fn=<NllLossBackward>)
tensor(0.7810, grad_fn=<NllLossBackward>)
tensor(0.8530, grad_fn=<NllLossBackward>)
tensor(0.6119, grad_fn=<NllLossBackward>)
tensor(0.7150, grad_fn=<NllLossBackward>)
tensor(0.6867, grad_fn=<NllLossBackward>)
tensor(1.0057, grad_fn=<NllLossBackward>)
tensor(0.8258, grad_fn=<NllLossBackward>)
tensor(0.6406, grad_fn=<NllLossBackward>)
tensor(0.8040, grad_fn=<NllLossBackward>)
tensor(0.7774, grad_fn=<NllLossBackward>)
tensor(0.7416, grad_fn=<NllLossBackward>)
tensor(0.6745, grad_fn=<NllLossBackward>)
tensor(0.8330, grad_fn=<NllLossBackward>)
tensor(0.7959, grad_fn=<NllLossBackward>)
tensor(0.9162, grad_fn=<NllLossBackward>)
tensor(1.0311, grad_fn=<NllLossBackward>)
tensor(0.9207, grad_fn=<NllLossBackward>)
tensor(0.8259, grad_fn=<NllLossBackward>)
tensor(0.6788, grad_fn=<NllLossBackward>)
tensor(0.7326, grad_fn=<NllLossBackward>)
tensor(0.6328, grad_fn=<NllLossBackward>)
tensor(0.8205, grad_fn=<NllLossBackward>)
tensor(0.6644, grad_fn=<NllLossBackward>)
tensor(0.9653, grad_fn=<NllLossBackward>)
tensor(0.8633, grad_fn=<NllLossBackward>)
tensor(0.7189, grad_fn=<NllLossBackward>)
tensor(0.9078, grad_fn=<NllLossBackward>)
tensor(0.8050, grad_fn=<NllLossBackward>)
tensor(0.6895, grad_fn=<NllLossBackward>)
tensor(0.5428, grad_fn=<NllLossBackward>)
tensor(0.8596, grad_fn=<NllLossBackward>)
tensor(0.6370, grad_fn=<NllLossBackward>)
tensor(0.9341, grad_fn=<NllLossBackward>)
tensor(0.6030, grad_fn=<NllLossBackward>)
tensor(0.7674, grad_fn=<NllLossBackward>)
tensor(0.7420, grad_fn=<NllLossBackward>)
tensor(0.8008, grad_fn=<NllLossBackward>)
tensor(0.8943, grad_fn=<NllLossBackward>)
tensor(0.7491, grad_fn=<NllLossBackward>)
tensor(0.9084, grad_fn=<NllLossBackward>)
tensor(0.7383, grad_fn=<NllLossBackward>)
tensor(0.6607, grad_fn=<NllLossBackward>)
tensor(0.6361, grad_fn=<NllLossBackward>)
tensor(0.7478, grad_fn=<NllLossBackward>)
tensor(0.5864, grad_fn=<NllLossBackward>)
tensor(0.8633, grad_fn=<NllLossBackward>)
tensor(0.7285, grad_fn=<NllLossBackward>)
tensor(0.7492, grad_fn=<NllLossBackward>)
tensor(0.8460, grad_fn=<NllLossBackward>)
tensor(0.8438, grad_fn=<NllLossBackward>)
tensor(0.9909, grad_fn=<NllLossBackward>)
tensor(0.6670, grad_fn=<NllLossBackward>)
tensor(0.6768, grad_fn=<NllLossBackward>)
tensor(0.8661, grad_fn=<NllLossBackward>)
tensor(0.6738, grad_fn=<NllLossBackward>)
tensor(0.5956, grad_fn=<NllLossBackward>)
tensor(0.8264, grad_fn=<NllLossBackward>)
tensor(0.9983, grad_fn=<NllLossBackward>)
tensor(0.8128, grad_fn=<NllLossBackward>)
tensor(0.8495, grad_fn=<NllLossBackward>)
tensor(0.8289, grad_fn=<NllLossBackward>)
tensor(0.7093, grad_fn=<NllLossBackward>)
tensor(0.9013, grad_fn=<NllLossBackward>)
tensor(1.0313, grad_fn=<NllLossBackward>)
tensor(0.7483, grad_fn=<NllLossBackward>)
tensor(0.8756, grad_fn=<NllLossBackward>)
tensor(0.8815, grad_fn=<NllLossBackward>)
tensor(0.7172, grad_fn=<NllLossBackward>)
tensor(0.9101, grad_fn=<NllLossBackward>)
tensor(0.8325, grad_fn=<NllLossBackward>)
tensor(0.8743, grad_fn=<NllLossBackward>)
tensor(0.6468, grad_fn=<NllLossBackward>)
tensor(0.7994, grad_fn=<NllLossBackward>)
tensor(0.7621, grad_fn=<NllLossBackward>)
tensor(0.6671, grad_fn=<NllLossBackward>)
tensor(0.7220, grad_fn=<NllLossBackward>)
tensor(1.0542, grad_fn=<NllLossBackward>)
tensor(0.9232, grad_fn=<NllLossBackward>)
tensor(0.6276, grad_fn=<NllLossBackward>)
tensor(0.9557, grad_fn=<NllLossBackward>)
tensor(0.7554, grad_fn=<NllLossBackward>)
tensor(1.0219, grad_fn=<NllLossBackward>)
tensor(0.8817, grad_fn=<NllLossBackward>)
tensor(0.5933, grad_fn=<NllLossBackward>)
tensor(0.7026, grad_fn=<NllLossBackward>)
tensor(0.9322, grad_fn=<NllLossBackward>)
tensor(0.9985, grad_fn=<NllLossBackward>)
tensor(0.9405, grad_fn=<NllLossBackward>)
tensor(0.5869, grad_fn=<NllLossBackward>)
tensor(0.9015, grad_fn=<NllLossBackward>)
tensor(1.0257, grad_fn=<NllLossBackward>)
tensor(0.6356, grad_fn=<NllLossBackward>)
tensor(0.9087, grad_fn=<NllLossBackward>)
tensor(0.6832, grad_fn=<NllLossBackward>)
tensor(1.0058, grad_fn=<NllLossBackward>)
tensor(0.9265, grad_fn=<NllLossBackward>)
tensor(0.6188, grad_fn=<NllLossBackward>)
tensor(1.0565, grad_fn=<NllLossBackward>)
tensor(0.5667, grad_fn=<NllLossBackward>)
tensor(0.8639, grad_fn=<NllLossBackward>)
tensor(0.9530, grad_fn=<NllLossBackward>)
tensor(0.6344, grad_fn=<NllLossBackward>)
tensor(0.8509, grad_fn=<NllLossBackward>)
tensor(0.7135, grad_fn=<NllLossBackward>)
tensor(0.9283, grad_fn=<NllLossBackward>)
tensor(0.9008, grad_fn=<NllLossBackward>)
tensor(0.7921, grad_fn=<NllLossBackward>)
tensor(0.8313, grad_fn=<NllLossBackward>)
tensor(0.9084, grad_fn=<NllLossBackward>)
tensor(0.7636, grad_fn=<NllLossBackward>)
tensor(0.7947, grad_fn=<NllLossBackward>)
tensor(0.9467, grad_fn=<NllLossBackward>)
tensor(0.8614, grad_fn=<NllLossBackward>)
tensor(0.8623, grad_fn=<NllLossBackward>)
tensor(0.7071, grad_fn=<NllLossBackward>)
tensor(0.7903, grad_fn=<NllLossBackward>)
tensor(0.6805, grad_fn=<NllLossBackward>)
tensor(0.7113, grad_fn=<NllLossBackward>)
tensor(0.6706, grad_fn=<NllLossBackward>)
tensor(0.8209, grad_fn=<NllLossBackward>)
tensor(0.7984, grad_fn=<NllLossBackward>)
tensor(0.9881, grad_fn=<NllLossBackward>)
tensor(0.7782, grad_fn=<NllLossBackward>)
tensor(0.7929, grad_fn=<NllLossBackward>)
tensor(0.7470, grad_fn=<NllLossBackward>)
tensor(1.0123, grad_fn=<NllLossBackward>)
tensor(0.8230, grad_fn=<NllLossBackward>)
tensor(0.8525, grad_fn=<NllLossBackward>)
tensor(0.7064, grad_fn=<NllLossBackward>)
tensor(0.6805, grad_fn=<NllLossBackward>)
tensor(0.8653, grad_fn=<NllLossBackward>)
tensor(0.7748, grad_fn=<NllLossBackward>)
tensor(0.7698, grad_fn=<NllLossBackward>)
tensor(0.8156, grad_fn=<NllLossBackward>)
tensor(0.7672, grad_fn=<NllLossBackward>)
tensor(0.7232, grad_fn=<NllLossBackward>)
tensor(0.8167, grad_fn=<NllLossBackward>)
tensor(0.7949, grad_fn=<NllLossBackward>)
tensor(0.7510, grad_fn=<NllLossBackward>)
tensor(0.7011, grad_fn=<NllLossBackward>)
tensor(0.8116, grad_fn=<NllLossBackward>)
tensor(0.7101, grad_fn=<NllLossBackward>)
tensor(0.8288, grad_fn=<NllLossBackward>)
tensor(0.7508, grad_fn=<NllLossBackward>)
tensor(0.7879, grad_fn=<NllLossBackward>)
tensor(0.7750, grad_fn=<NllLossBackward>)
tensor(0.8426, grad_fn=<NllLossBackward>)
tensor(0.5851, grad_fn=<NllLossBackward>)
tensor(0.7773, grad_fn=<NllLossBackward>)
tensor(0.8997, grad_fn=<NllLossBackward>)
tensor(0.9345, grad_fn=<NllLossBackward>)
tensor(0.6633, grad_fn=<NllLossBackward>)
tensor(0.6784, grad_fn=<NllLossBackward>)
tensor(0.6693, grad_fn=<NllLossBackward>)
tensor(0.7871, grad_fn=<NllLossBackward>)
tensor(0.9598, grad_fn=<NllLossBackward>)
tensor(0.6461, grad_fn=<NllLossBackward>)
tensor(0.7522, grad_fn=<NllLossBackward>)
tensor(1.1746, grad_fn=<NllLossBackward>)
tensor(0.7946, grad_fn=<NllLossBackward>)
tensor(0.6576, grad_fn=<NllLossBackward>)
tensor(0.8996, grad_fn=<NllLossBackward>)
tensor(0.7726, grad_fn=<NllLossBackward>)
tensor(0.7385, grad_fn=<NllLossBackward>)
tensor(0.7350, grad_fn=<NllLossBackward>)
tensor(0.6889, grad_fn=<NllLossBackward>)
tensor(0.8585, grad_fn=<NllLossBackward>)
tensor(0.7252, grad_fn=<NllLossBackward>)
tensor(0.7042, grad_fn=<NllLossBackward>)
tensor(0.8799, grad_fn=<NllLossBackward>)
tensor(0.5369, grad_fn=<NllLossBackward>)
tensor(0.8062, grad_fn=<NllLossBackward>)
tensor(0.8021, grad_fn=<NllLossBackward>)
tensor(0.7313, grad_fn=<NllLossBackward>)
tensor(0.6012, grad_fn=<NllLossBackward>)
tensor(0.9116, grad_fn=<NllLossBackward>)
tensor(0.6744, grad_fn=<NllLossBackward>)
tensor(0.9559, grad_fn=<NllLossBackward>)
tensor(0.7590, grad_fn=<NllLossBackward>)
tensor(0.9532, grad_fn=<NllLossBackward>)
tensor(0.7671, grad_fn=<NllLossBackward>)
tensor(0.7762, grad_fn=<NllLossBackward>)
tensor(0.7566, grad_fn=<NllLossBackward>)
tensor(0.7593, grad_fn=<NllLossBackward>)
tensor(0.7406, grad_fn=<NllLossBackward>)
tensor(0.7787, grad_fn=<NllLossBackward>)
tensor(0.8136, grad_fn=<NllLossBackward>)
tensor(0.5529, grad_fn=<NllLossBackward>)
tensor(0.8065, grad_fn=<NllLossBackward>)
tensor(0.8685, grad_fn=<NllLossBackward>)
tensor(0.8211, grad_fn=<NllLossBackward>)
tensor(0.9561, grad_fn=<NllLossBackward>)
tensor(0.8386, grad_fn=<NllLossBackward>)
tensor(0.6064, grad_fn=<NllLossBackward>)
tensor(0.7413, grad_fn=<NllLossBackward>)
tensor(0.6632, grad_fn=<NllLossBackward>)
tensor(0.7452, grad_fn=<NllLossBackward>)
tensor(0.6998, grad_fn=<NllLossBackward>)
tensor(0.8205, grad_fn=<NllLossBackward>)
tensor(0.7763, grad_fn=<NllLossBackward>)
tensor(0.8441, grad_fn=<NllLossBackward>)
tensor(0.7934, grad_fn=<NllLossBackward>)
tensor(0.8645, grad_fn=<NllLossBackward>)
tensor(0.7565, grad_fn=<NllLossBackward>)
tensor(0.7646, grad_fn=<NllLossBackward>)
tensor(0.5332, grad_fn=<NllLossBackward>)
tensor(0.7578, grad_fn=<NllLossBackward>)
tensor(0.6201, grad_fn=<NllLossBackward>)
tensor(0.6256, grad_fn=<NllLossBackward>)
tensor(0.6158, grad_fn=<NllLossBackward>)
tensor(0.8520, grad_fn=<NllLossBackward>)
tensor(0.9006, grad_fn=<NllLossBackward>)
tensor(0.5757, grad_fn=<NllLossBackward>)
tensor(0.7078, grad_fn=<NllLossBackward>)
tensor(0.5404, grad_fn=<NllLossBackward>)
tensor(0.7383, grad_fn=<NllLossBackward>)
tensor(0.9015, grad_fn=<NllLossBackward>)
tensor(0.7151, grad_fn=<NllLossBackward>)
tensor(0.9265, grad_fn=<NllLossBackward>)
tensor(0.9293, grad_fn=<NllLossBackward>)
tensor(0.7653, grad_fn=<NllLossBackward>)
tensor(0.8442, grad_fn=<NllLossBackward>)
tensor(0.8353, grad_fn=<NllLossBackward>)
tensor(0.5711, grad_fn=<NllLossBackward>)
tensor(0.7085, grad_fn=<NllLossBackward>)
tensor(1.0415, grad_fn=<NllLossBackward>)
tensor(0.9253, grad_fn=<NllLossBackward>)
tensor(0.9923, grad_fn=<NllLossBackward>)
tensor(0.8098, grad_fn=<NllLossBackward>)
tensor(0.7251, grad_fn=<NllLossBackward>)
tensor(0.6854, grad_fn=<NllLossBackward>)
tensor(0.8255, grad_fn=<NllLossBackward>)
tensor(0.7893, grad_fn=<NllLossBackward>)
tensor(0.9470, grad_fn=<NllLossBackward>)
tensor(0.7518, grad_fn=<NllLossBackward>)
tensor(0.7586, grad_fn=<NllLossBackward>)
tensor(0.6842, grad_fn=<NllLossBackward>)
tensor(0.9246, grad_fn=<NllLossBackward>)
tensor(0.7161, grad_fn=<NllLossBackward>)
tensor(0.7285, grad_fn=<NllLossBackward>)
tensor(0.7212, grad_fn=<NllLossBackward>)
tensor(0.5808, grad_fn=<NllLossBackward>)
tensor(0.7947, grad_fn=<NllLossBackward>)
tensor(0.9082, grad_fn=<NllLossBackward>)
tensor(0.7287, grad_fn=<NllLossBackward>)
tensor(0.5995, grad_fn=<NllLossBackward>)
tensor(0.9633, grad_fn=<NllLossBackward>)
tensor(0.8313, grad_fn=<NllLossBackward>)
tensor(0.7518, grad_fn=<NllLossBackward>)
tensor(0.6092, grad_fn=<NllLossBackward>)
tensor(0.9824, grad_fn=<NllLossBackward>)
tensor(0.6653, grad_fn=<NllLossBackward>)
tensor(0.8603, grad_fn=<NllLossBackward>)
tensor(0.6490, grad_fn=<NllLossBackward>)
tensor(0.6911, grad_fn=<NllLossBackward>)
tensor(0.7257, grad_fn=<NllLossBackward>)
tensor(0.7694, grad_fn=<NllLossBackward>)

2410
my_runs/5/metrics.json Normal file

File diff suppressed because it is too large Load Diff

88
my_runs/5/run.json Normal file
View File

@ -0,0 +1,88 @@
{
"artifacts": [],
"command": "my_main",
"experiment": {
"base_dir": "e:\\Pyton\\IUM\\ium_452627",
"dependencies": [
"numpy==1.20.0",
"pandas==1.4.1",
"sacred==0.8.4",
"torch==1.8.1+cu102",
"torchvision==0.9.1+cu102"
],
"mainfile": "sacred_train.py",
"name": "s452627",
"repositories": [],
"sources": [
[
"sacred_train.py",
"_sources\\sacred_train_58880e146636573a7d2893b734269763.py"
],
[
"zadanie1.py",
"_sources\\zadanie1_214ad86c108ac00197ed071c54ee3658.py"
]
]
},
"heartbeat": "2023-05-11T19:47:40.525506",
"host": {
"ENV": {},
"cpu": "Intel(R) Core(TM) i7-6700K CPU @ 4.00GHz",
"gpus": {
"driver_version": "472.12",
"gpus": [
{
"model": "NVIDIA GeForce GTX 1070",
"persistence_mode": false,
"total_memory": 8192
}
]
},
"hostname": "JAKUB-HENYK",
"os": [
"Windows",
"Windows-10-10.0.19041-SP0"
],
"python_version": "3.8.3"
},
"meta": {
"command": "my_main",
"config_updates": {},
"named_configs": [],
"options": {
"--beat-interval": null,
"--capture": null,
"--comment": null,
"--debug": false,
"--enforce_clean": false,
"--file_storage": null,
"--force": false,
"--help": false,
"--id": null,
"--loglevel": null,
"--mongo_db": null,
"--name": null,
"--pdb": false,
"--print-config": false,
"--priority": null,
"--queue": false,
"--s3": null,
"--sql": null,
"--tiny_db": null,
"--unobserved": false,
"COMMAND": null,
"UPDATE": [],
"help": false,
"with": false
}
},
"resources": [
[
"E:\\Pyton\\IUM\\ium_452627\\Customers.csv",
"my_runs\\_resources\\Customers_6514be2808e61a30190fa6265e2352da.csv"
]
],
"result": null,
"start_time": "2023-05-11T19:47:00.196563",
"status": "RUNNING"
}

5
my_runs/6/config.json Normal file
View File

@ -0,0 +1,5 @@
{
"epochs": 10,
"learning_rate": 0.001,
"seed": 562570933
}

0
my_runs/6/cout.txt Normal file
View File

1
my_runs/6/metrics.json Normal file
View File

@ -0,0 +1 @@
{}

96
my_runs/6/run.json Normal file
View File

@ -0,0 +1,96 @@
{
"artifacts": [],
"command": "my_main",
"experiment": {
"base_dir": "e:\\Pyton\\IUM\\ium_452627",
"dependencies": [
"numpy==1.20.0",
"pandas==1.4.1",
"sacred==0.8.4",
"torch==1.8.1+cu102",
"torchvision==0.9.1+cu102"
],
"mainfile": "sacred_train.py",
"name": "s452627",
"repositories": [],
"sources": [
[
"sacred_train.py",
"_sources\\sacred_train_ff46cdc09c67889917b2588f7c9f993f.py"
],
[
"zadanie1.py",
"_sources\\zadanie1_214ad86c108ac00197ed071c54ee3658.py"
]
]
},
"fail_trace": [
"Traceback (most recent call last):\n",
" File \"C:\\Users\\kubak\\AppData\\Local\\Programs\\Python\\Python38\\lib\\site-packages\\sacred\\config\\captured_function.py\", line 42, in captured_function\n result = wrapped(*args, **kwargs)\n",
" File \"e:/Pyton/IUM/ium_452627/sacred_train.py\", line 94, in my_main\n trainNet(_run, trainloader, criterion, optimizer, net, int(float(epochs)))\n",
" File \"e:/Pyton/IUM/ium_452627/sacred_train.py\", line 50, in trainNet\n print(loss[0])\n",
"IndexError: invalid index of a 0-dim tensor. Use `tensor.item()` in Python or `tensor.item<T>()` in C++ to convert a 0-dim tensor to a number\n"
],
"heartbeat": "2023-05-11T19:48:10.063748",
"host": {
"ENV": {},
"cpu": "Intel(R) Core(TM) i7-6700K CPU @ 4.00GHz",
"gpus": {
"driver_version": "472.12",
"gpus": [
{
"model": "NVIDIA GeForce GTX 1070",
"persistence_mode": false,
"total_memory": 8192
}
]
},
"hostname": "JAKUB-HENYK",
"os": [
"Windows",
"Windows-10-10.0.19041-SP0"
],
"python_version": "3.8.3"
},
"meta": {
"command": "my_main",
"config_updates": {},
"named_configs": [],
"options": {
"--beat-interval": null,
"--capture": null,
"--comment": null,
"--debug": false,
"--enforce_clean": false,
"--file_storage": null,
"--force": false,
"--help": false,
"--id": null,
"--loglevel": null,
"--mongo_db": null,
"--name": null,
"--pdb": false,
"--print-config": false,
"--priority": null,
"--queue": false,
"--s3": null,
"--sql": null,
"--tiny_db": null,
"--unobserved": false,
"COMMAND": null,
"UPDATE": [],
"help": false,
"with": false
}
},
"resources": [
[
"E:\\Pyton\\IUM\\ium_452627\\Customers.csv",
"my_runs\\_resources\\Customers_6514be2808e61a30190fa6265e2352da.csv"
]
],
"result": null,
"start_time": "2023-05-11T19:48:03.769748",
"status": "FAILED",
"stop_time": "2023-05-11T19:48:10.065747"
}

5
my_runs/7/config.json Normal file
View File

@ -0,0 +1,5 @@
{
"epochs": 10,
"learning_rate": 0.001,
"seed": 486901295
}

400
my_runs/7/cout.txt Normal file
View File

@ -0,0 +1,400 @@
1.6230660676956177
1.6820298433303833
1.7491368055343628
1.5389869213104248
1.6283531188964844
1.5094218254089355
1.7905722856521606
1.5641237497329712
1.5830129384994507
1.5243418216705322
1.519378900527954
1.6054351329803467
1.5723960399627686
1.6757309436798096
1.6402519941329956
1.5999528169631958
1.5646768808364868
1.621456503868103
1.556501030921936
1.5461764335632324
1.5640205144882202
1.5807640552520752
1.509324073791504
1.5999341011047363
1.54926598072052
1.4149173498153687
1.4550354480743408
1.4619419574737549
1.5496633052825928
1.4661115407943726
1.4423216581344604
1.4768248796463013
1.5189144611358643
1.5314130783081055
1.375200867652893
1.6448571681976318
1.4357342720031738
1.4321017265319824
1.377081036567688
1.4897136688232422
1.4930771589279175
1.384333610534668
1.4029908180236816
1.5566425323486328
1.3000067472457886
1.4866435527801514
1.4507079124450684
1.276503562927246
1.3888378143310547
1.524937391281128
1.3618971109390259
1.3161665201187134
1.5433791875839233
1.2492393255233765
1.4940097332000732
1.4125964641571045
1.4479514360427856
1.3118144273757935
1.4395060539245605
1.2881097793579102
1.3483657836914062
1.468015193939209
1.2474229335784912
1.4239823818206787
1.2277672290802002
1.274768590927124
1.2919094562530518
1.1657848358154297
1.4304120540618896
1.3109701871871948
1.3414161205291748
1.1767857074737549
1.4821016788482666
1.321860432624817
1.1414504051208496
1.3034462928771973
1.1764267683029175
1.289609670639038
1.0915460586547852
1.2168611288070679
1.29572331905365
1.2754342555999756
1.2659229040145874
1.2495107650756836
1.3686363697052002
1.39882493019104
1.2654469013214111
1.1343276500701904
1.0715656280517578
1.5835096836090088
1.024012804031372
1.2513726949691772
1.2777726650238037
1.1889636516571045
1.2051066160202026
1.2106695175170898
1.1721521615982056
1.1284140348434448
1.080606460571289
1.0519185066223145
1.0566167831420898
1.2587218284606934
0.9446205496788025
1.2454330921173096
1.1374781131744385
1.150152564048767
1.2487719058990479
1.297490119934082
1.193822979927063
1.0731717348098755
1.1139096021652222
1.1425707340240479
1.0449187755584717
1.1798356771469116
0.975722074508667
1.0425740480422974
1.0942325592041016
1.388864278793335
1.192713737487793
1.1410622596740723
0.9496395587921143
1.0701346397399902
1.1487751007080078
1.432262659072876
1.2153462171554565
1.0495221614837646
1.0923141241073608
1.1329574584960938
1.1577043533325195
1.3608773946762085
1.1734118461608887
0.9182109832763672
0.9625385999679565
1.1836010217666626
0.8289363980293274
0.9563491940498352
1.2100787162780762
0.8395973443984985
1.2490291595458984
0.8419893980026245
1.1588988304138184
1.1695365905761719
1.3074092864990234
1.2472511529922485
1.1702475547790527
1.1223745346069336
0.9538043141365051
1.014662742614746
1.1009855270385742
1.3475929498672485
0.8792193531990051
0.8265282511711121
1.2419137954711914
1.150453805923462
0.960175633430481
1.1626787185668945
1.2132294178009033
1.0720094442367554
1.2487077713012695
0.9944485425949097
0.9793208837509155
1.003244400024414
1.0830307006835938
0.9764111042022705
1.0214650630950928
0.9934533834457397
1.2536094188690186
1.0190931558609009
0.9218314290046692
0.9265110492706299
1.1093831062316895
0.873543381690979
1.3817375898361206
0.9874609708786011
1.1531572341918945
0.9912592768669128
0.8528863191604614
0.9787036180496216
0.8060413002967834
0.9778159856796265
0.815345287322998
1.1282814741134644
1.377526879310608
1.159498691558838
1.1649258136749268
1.0051367282867432
0.8384489417076111
0.8317087292671204
0.9064835906028748
0.9986045360565186
1.030361294746399
1.0492091178894043
0.8241448402404785
0.9408456087112427
1.152981162071228
0.9892047643661499
1.0371530055999756
1.0553503036499023
0.9152474403381348
0.9896761178970337
1.061337947845459
1.0290452241897583
1.1394522190093994
1.0344206094741821
0.8296985626220703
1.0531589984893799
0.9323599338531494
0.8394390344619751
0.8166555166244507
0.8316986560821533
1.002506971359253
1.0503551959991455
0.8773961067199707
1.1079442501068115
0.9764309525489807
0.865100622177124
1.1103380918502808
1.1001451015472412
1.139549970626831
1.2617132663726807
1.1216710805892944
0.9117012023925781
0.8887176513671875
0.9669941067695618
0.7902080416679382
0.8835121393203735
1.0451416969299316
1.0656113624572754
0.6773416996002197
1.0285110473632812
0.9202332496643066
1.0144822597503662
1.0337969064712524
1.017679214477539
0.9738996028900146
1.105922818183899
1.2160775661468506
1.1865177154541016
1.0323147773742676
0.954483151435852
1.261365294456482
0.8203682899475098
0.9776293039321899
0.6714390516281128
0.6768577098846436
0.9755690097808838
1.1889386177062988
1.0228142738342285
0.7801070213317871
0.8333088159561157
0.8598947525024414
0.8841252326965332
0.8776575326919556
1.0607362985610962
1.2591203451156616
1.0642971992492676
1.1072280406951904
1.02755606174469
0.9788860082626343
0.9183667898178101
0.7122618556022644
1.0134289264678955
0.5897078514099121
0.6340580582618713
1.022355079650879
0.8609708547592163
1.1205224990844727
0.9440596103668213
0.8599885106086731
0.9359227418899536
0.8256407380104065
1.0813887119293213
0.9757722616195679
1.1986292600631714
0.7315202355384827
0.6866023540496826
0.815874457359314
0.8804708123207092
1.201775074005127
0.9452980160713196
0.8180302381515503
0.8785721063613892
0.9843486547470093
0.9913842678070068
0.6751294136047363
0.9343889951705933
0.8577640056610107
1.0570249557495117
0.9384706616401672
0.8102688193321228
0.6193019151687622
0.8426458239555359
0.8701091408729553
1.0298147201538086
0.9379199743270874
1.1814982891082764
0.9192432761192322
1.1767094135284424
1.0947296619415283
0.7939531207084656
0.8930078148841858
1.2851269245147705
0.8082880973815918
1.1618633270263672
0.8891748189926147
0.8962579369544983
0.9503337144851685
1.0448545217514038
0.8261522650718689
1.1176156997680664
0.8245120644569397
0.7921063899993896
0.966664731502533
0.6128285527229309
1.1398265361785889
0.8951024413108826
0.8555091023445129
0.9423876404762268
0.8754314184188843
0.8727045059204102
1.0850248336791992
1.0288532972335815
0.7522667646408081
0.6756600141525269
0.6829381585121155
1.0531389713287354
1.062136173248291
0.7308762073516846
0.7852012515068054
0.8483222126960754
0.9327411651611328
0.8266010284423828
0.7300620675086975
0.7384270429611206
0.8691636323928833
0.8261711597442627
0.9110106229782104
0.7787065505981445
0.9346214532852173
1.0036942958831787
1.1346150636672974
1.1032440662384033
0.9842561483383179
0.6584384441375732
0.7690529823303223
0.9518932700157166
0.969390869140625
0.6292759776115417
0.861823558807373
1.0020227432250977
0.8043882250785828
1.0703190565109253
1.2174514532089233
0.9096038341522217
0.9197603464126587
0.8633270263671875
1.0342717170715332
0.8141842484474182
0.8617056012153625
0.8302423357963562
0.8497481346130371
0.871558666229248
1.1848621368408203
1.0444331169128418
1.0134308338165283
0.6316218376159668
0.8203948140144348
0.9308501482009888
0.9720712304115295
0.9066691398620605
0.849102795124054
0.9337921142578125
0.8078703284263611
0.7128146886825562
1.0422967672348022
0.9367537498474121
0.8211072087287903
0.7752896547317505
0.9761680960655212
0.6779760718345642
0.9350895881652832
0.8722249269485474
0.8311982154846191
0.905919075012207
0.8581783771514893
0.869439959526062
0.7472037672996521
0.7891084551811218
0.9732564687728882
1.0308506488800049
0.7775583863258362
0.7620372176170349
1.0218868255615234
0.9783582091331482
1.0991756916046143
1.1250553131103516
0.9594120383262634
0.8759898543357849
0.9254418611526489
0.8226104974746704

1210
my_runs/7/metrics.json Normal file

File diff suppressed because it is too large Load Diff

88
my_runs/7/run.json Normal file
View File

@ -0,0 +1,88 @@
{
"artifacts": [],
"command": "my_main",
"experiment": {
"base_dir": "e:\\Pyton\\IUM\\ium_452627",
"dependencies": [
"numpy==1.20.0",
"pandas==1.4.1",
"sacred==0.8.4",
"torch==1.8.1+cu102",
"torchvision==0.9.1+cu102"
],
"mainfile": "sacred_train.py",
"name": "s452627",
"repositories": [],
"sources": [
[
"sacred_train.py",
"_sources\\sacred_train_544504897ad356dd64cd4527a9914747.py"
],
[
"zadanie1.py",
"_sources\\zadanie1_214ad86c108ac00197ed071c54ee3658.py"
]
]
},
"heartbeat": "2023-05-11T19:49:13.645610",
"host": {
"ENV": {},
"cpu": "Intel(R) Core(TM) i7-6700K CPU @ 4.00GHz",
"gpus": {
"driver_version": "472.12",
"gpus": [
{
"model": "NVIDIA GeForce GTX 1070",
"persistence_mode": false,
"total_memory": 8192
}
]
},
"hostname": "JAKUB-HENYK",
"os": [
"Windows",
"Windows-10-10.0.19041-SP0"
],
"python_version": "3.8.3"
},
"meta": {
"command": "my_main",
"config_updates": {},
"named_configs": [],
"options": {
"--beat-interval": null,
"--capture": null,
"--comment": null,
"--debug": false,
"--enforce_clean": false,
"--file_storage": null,
"--force": false,
"--help": false,
"--id": null,
"--loglevel": null,
"--mongo_db": null,
"--name": null,
"--pdb": false,
"--print-config": false,
"--priority": null,
"--queue": false,
"--s3": null,
"--sql": null,
"--tiny_db": null,
"--unobserved": false,
"COMMAND": null,
"UPDATE": [],
"help": false,
"with": false
}
},
"resources": [
[
"E:\\Pyton\\IUM\\ium_452627\\Customers.csv",
"my_runs\\_resources\\Customers_6514be2808e61a30190fa6265e2352da.csv"
]
],
"result": null,
"start_time": "2023-05-11T19:48:43.353238",
"status": "RUNNING"
}

View File

@ -0,0 +1,104 @@
#!/usr/bin/python
import pandas as pd
import numpy as np
import zadanie1 as z
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from sacred import Experiment
from sacred.observers import FileStorageObserver
from sacred.observers import MongoObserver
class Net(nn.Module):
def __init__(self):
super().__init__()
#self.conv1 = nn.Conv2d(3, 6, 5)
#self.pool = nn.MaxPool2d(2, 2)
#self.conv2 = nn.Conv2d(6, 16, 5)
#self.fc1 = nn.Linear(16 * 5 * 5, 120)
#self.fc2 = nn.Linear(20, 6)
self.fc3 = nn.Linear(6, 6)
def forward(self, x):
#x = self.pool(F.relu(self.conv1(x)))
#x = self.pool(F.relu(self.conv2(x)))
#x = torch.flatten(x, 1)
#x = F.relu(self.fc1(x))
#x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def trainNet(_run, trainloader, criterion, optimizer, net, epochs=20):
for epoch in range(epochs):
for i, data in enumerate(trainloader, 0):
inputs, labels = data
labelsX = torch.Tensor([x for x in labels])
labels = labelsX.type(torch.LongTensor)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
print(loss.item())
_run.log_scalar("training.loss", loss)
loss.backward()
optimizer.step()
print('Finished Training')
ex = Experiment("s452627", interactive=True, save_git_info=False)
ex.observers.append(FileStorageObserver('my_runs'))
#ex.observers.append(MongoObserver(url='mongodb://admin:IUM_2021@172.17.0.1:27017', db_name='sacred'))
@ex.config
def my_config():
epochs = 10
learning_rate = 0.001
@ex.automain
def my_main(epochs, learning_rate, _run):
ex.open_resource("Customers.csv", "r")
train, dev, test = z.prepareData()
batch_size = 4
trainlist = train.values.tolist()
testlist = test.values.tolist()
trainset = [[torch.Tensor(x[1:]), torch.Tensor([x[0]])] for x in trainlist]
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
shuffle=True, num_workers=2)
testset = [[torch.Tensor(x[1:]), torch.Tensor([x[0]])] for x in testlist]
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=False, num_workers=2)
classes = ('male', 'female')
net = Net()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9)
trainNet(_run, trainloader, criterion, optimizer, net, int(float(epochs)))
PATH = './cifar_net.pth'
torch.save(net.state_dict(), PATH)
ex.add_artifact("cifar_net.pth")
#if __name__ == '__main__':
#ex.run()

View File

@ -0,0 +1,104 @@
#!/usr/bin/python
import pandas as pd
import numpy as np
import zadanie1 as z
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from sacred import Experiment
from sacred.observers import FileStorageObserver
from sacred.observers import MongoObserver
class Net(nn.Module):
def __init__(self):
super().__init__()
#self.conv1 = nn.Conv2d(3, 6, 5)
#self.pool = nn.MaxPool2d(2, 2)
#self.conv2 = nn.Conv2d(6, 16, 5)
#self.fc1 = nn.Linear(16 * 5 * 5, 120)
#self.fc2 = nn.Linear(20, 6)
self.fc3 = nn.Linear(6, 6)
def forward(self, x):
#x = self.pool(F.relu(self.conv1(x)))
#x = self.pool(F.relu(self.conv2(x)))
#x = torch.flatten(x, 1)
#x = F.relu(self.fc1(x))
#x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def trainNet(_run, trainloader, criterion, optimizer, net, epochs=20):
for epoch in range(epochs):
for i, data in enumerate(trainloader, 0):
inputs, labels = data
labelsX = torch.Tensor([x for x in labels])
labels = labelsX.type(torch.LongTensor)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
print(loss)
_run.log_scalar("training.loss", loss)
loss.backward()
optimizer.step()
print('Finished Training')
ex = Experiment("s452627", interactive=True, save_git_info=False)
ex.observers.append(FileStorageObserver('my_runs'))
#ex.observers.append(MongoObserver(url='mongodb://admin:IUM_2021@172.17.0.1:27017', db_name='sacred'))
@ex.config
def my_config():
epochs = 10
learning_rate = 0.001
@ex.automain
def my_main(epochs, learning_rate, _run):
ex.open_resource("Customers.csv", "r")
train, dev, test = z.prepareData()
batch_size = 4
trainlist = train.values.tolist()
testlist = test.values.tolist()
trainset = [[torch.Tensor(x[1:]), torch.Tensor([x[0]])] for x in trainlist]
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
shuffle=True, num_workers=2)
testset = [[torch.Tensor(x[1:]), torch.Tensor([x[0]])] for x in testlist]
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=False, num_workers=2)
classes = ('male', 'female')
net = Net()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9)
trainNet(_run, trainloader, criterion, optimizer, net, int(float(epochs)))
PATH = './cifar_net.pth'
torch.save(net.state_dict(), PATH)
ex.add_artifact("cifar_net.pth")
#if __name__ == '__main__':
#ex.run()

View File

@ -0,0 +1,104 @@
#!/usr/bin/python
import pandas as pd
import numpy as np
import zadanie1 as z
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from sacred import Experiment
from sacred.observers import FileStorageObserver
from sacred.observers import MongoObserver
class Net(nn.Module):
def __init__(self):
super().__init__()
#self.conv1 = nn.Conv2d(3, 6, 5)
#self.pool = nn.MaxPool2d(2, 2)
#self.conv2 = nn.Conv2d(6, 16, 5)
#self.fc1 = nn.Linear(16 * 5 * 5, 120)
#self.fc2 = nn.Linear(20, 6)
self.fc3 = nn.Linear(6, 6)
def forward(self, x):
#x = self.pool(F.relu(self.conv1(x)))
#x = self.pool(F.relu(self.conv2(x)))
#x = torch.flatten(x, 1)
#x = F.relu(self.fc1(x))
#x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def trainNet(_run, trainloader, criterion, optimizer, net, epochs=20):
for epoch in range(epochs):
for i, data in enumerate(trainloader, 0):
inputs, labels = data
labelsX = torch.Tensor([x for x in labels])
labels = labelsX.type(torch.LongTensor)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
print(loss[0])
_run.log_scalar("training.loss", loss)
loss.backward()
optimizer.step()
print('Finished Training')
ex = Experiment("s452627", interactive=True, save_git_info=False)
ex.observers.append(FileStorageObserver('my_runs'))
#ex.observers.append(MongoObserver(url='mongodb://admin:IUM_2021@172.17.0.1:27017', db_name='sacred'))
@ex.config
def my_config():
epochs = 10
learning_rate = 0.001
@ex.automain
def my_main(epochs, learning_rate, _run):
ex.open_resource("Customers.csv", "r")
train, dev, test = z.prepareData()
batch_size = 4
trainlist = train.values.tolist()
testlist = test.values.tolist()
trainset = [[torch.Tensor(x[1:]), torch.Tensor([x[0]])] for x in trainlist]
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
shuffle=True, num_workers=2)
testset = [[torch.Tensor(x[1:]), torch.Tensor([x[0]])] for x in testlist]
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=False, num_workers=2)
classes = ('male', 'female')
net = Net()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9)
trainNet(_run, trainloader, criterion, optimizer, net, int(float(epochs)))
PATH = './cifar_net.pth'
torch.save(net.state_dict(), PATH)
ex.add_artifact("cifar_net.pth")
#if __name__ == '__main__':
#ex.run()

View File

@ -47,6 +47,7 @@ def trainNet(_run, trainloader, criterion, optimizer, net, epochs=20):
outputs = net(inputs)
loss = criterion(outputs, labels)
print(loss.item())
_run.log_scalar("training.loss", loss)
loss.backward()