Add missing file, when generating a challenge

This commit is contained in:
Filip Graliński 2019-02-01 12:04:52 +01:00
parent abcce9bf68
commit f7bd1b2ccd
5 changed files with 16 additions and 3 deletions

View File

@ -1,5 +1,5 @@
name: geval
version: 1.14.0.0
version: 1.15.0.0
synopsis: Machine learning evaluation tools
description: Please see README.md
homepage: http://github.com/name/project

View File

@ -106,7 +106,7 @@ defaultLogLossHashedSize = 10
data Metric = RMSE | MSE | Pearson | Spearman | BLEU | GLEU | WER | Accuracy | ClippEU
| FMeasure Double | MacroFMeasure Double | NMI
| LogLossHashed Word32 | CharMatch | MAP | LogLoss | Likelihood
| BIOF1 | BIOF1Labels | TokenAccuracy | LikelihoodHashed Word32 | MAE | MultiLabelFMeasure Double
| BIOF1 | BIOF1Labels | TokenAccuracy | LikelihoodHashed Word32 | MAE | SMAPE | MultiLabelFMeasure Double
| MultiLabelLogLoss | MultiLabelLikelihood
| SoftFMeasure Double
deriving (Eq)
@ -145,6 +145,7 @@ instance Show Metric where
show BIOF1Labels = "BIO-F1-Labels"
show TokenAccuracy = "TokenAccuracy"
show MAE = "MAE"
show SMAPE = "SMAPE"
show (MultiLabelFMeasure beta) = "MultiLabel-F" ++ (show beta)
show MultiLabelLogLoss = "MultiLabel-Logloss"
show MultiLabelLikelihood = "MultiLabel-Likelihood"
@ -186,6 +187,7 @@ instance Read Metric where
readsPrec _ ('B':'I':'O':'-':'F':'1':theRest) = [(BIOF1, theRest)]
readsPrec _ ('T':'o':'k':'e':'n':'A':'c':'c':'u':'r':'a':'c':'y':theRest) = [(TokenAccuracy, theRest)]
readsPrec _ ('M':'A':'E':theRest) = [(MAE, theRest)]
readsPrec _ ('S':'M':'A':'P':'E':theRest) = [(SMAPE, theRest)]
readsPrec _ ('M':'u':'l':'t':'i':'L':'a':'b':'e':'l':'-':'L':'o':'g':'L':'o':'s':'s':theRest) = [(MultiLabelLogLoss, theRest)]
readsPrec _ ('M':'u':'l':'t':'i':'L':'a':'b':'e':'l':'-':'L':'i':'k':'e':'l':'i':'h':'o':'o':'d':theRest) = [(MultiLabelLikelihood, theRest)]
@ -218,6 +220,7 @@ getMetricOrdering BIOF1 = TheHigherTheBetter
getMetricOrdering BIOF1Labels = TheHigherTheBetter
getMetricOrdering TokenAccuracy = TheHigherTheBetter
getMetricOrdering MAE = TheLowerTheBetter
getMetricOrdering SMAPE = TheLowerTheBetter
getMetricOrdering (MultiLabelFMeasure _) = TheHigherTheBetter
getMetricOrdering MultiLabelLogLoss = TheLowerTheBetter
getMetricOrdering MultiLabelLikelihood = TheHigherTheBetter
@ -541,6 +544,10 @@ gevalCore' MSE _ = gevalCoreWithoutInput outParser outParser itemSquaredError av
gevalCore' MAE _ = gevalCoreWithoutInput outParser outParser itemAbsoluteError averageC id
where outParser = getValue . TR.double
gevalCore' SMAPE _ = gevalCoreWithoutInput outParser outParser smape averageC (* 100.0)
where outParser = getValue . TR.double
smape (exp, out) = (abs (exp-out)) / ((abs exp) + (abs out))
gevalCore' Pearson _ = gevalCoreByCorrelationMeasure pearson
gevalCore' Spearman _ = gevalCoreByCorrelationMeasure spearman

View File

@ -603,6 +603,9 @@ testInContents MultiLabelLikelihood = testInContents MultiLabelLogLoss
testInContents MultiLabelLogLoss = [hereLit|I am very sad
I hate
|]
testInContents _ = [hereLit|0.72 0 0.007
9.54 62 0.054
|]
testExpectedContents :: Metric -> String
testExpectedContents GLEU = testExpectedContents BLEU

View File

@ -210,7 +210,7 @@ metricReader = many $ option auto -- actually `some` should be used inst
( long "metric" -- --metric might be in the config.txt file...
<> short 'm'
<> metavar "METRIC"
<> help "Metric to be used - RMSE, MSE, Pearson, Spearman, Accuracy, LogLoss, Likelihood, F-measure (specify as F1, F2, F0.25, etc.), macro F-measure (specify as Macro-F1, Macro-F2, Macro-F0.25, etc.), multi-label F-measure (specify as MultiLabel-F1, MultiLabel-F2, MultiLabel-F0.25, etc.), MultiLabel-Likelihood, MAP, BLEU, GLEU (\"Google GLEU\" not the grammar correction metric), WER, NMI, ClippEU, LogLossHashed, LikelihoodHashed, BIO-F1, BIO-F1-Labels, TokenAccuracy, soft F-measure (specify as Soft-F1, Soft-F2, Soft-F0.25) or CharMatch" )
<> help "Metric to be used - RMSE, MSE, MAE, SMAPE, Pearson, Spearman, Accuracy, LogLoss, Likelihood, F-measure (specify as F1, F2, F0.25, etc.), macro F-measure (specify as Macro-F1, Macro-F2, Macro-F0.25, etc.), multi-label F-measure (specify as MultiLabel-F1, MultiLabel-F2, MultiLabel-F0.25, etc.), MultiLabel-Likelihood, MAP, BLEU, GLEU (\"Google GLEU\" not the grammar correction metric), WER, NMI, ClippEU, LogLossHashed, LikelihoodHashed, BIO-F1, BIO-F1-Labels, TokenAccuracy, soft F-measure (specify as Soft-F1, Soft-F2, Soft-F0.25) or CharMatch" )
altMetricReader :: Parser (Maybe Metric)
altMetricReader = optional $ option auto

View File

@ -84,6 +84,9 @@ main = hspec $ do
describe "mean absolute error" $ do
it "simple test with arguments" $
runGEvalTest "mae-simple" `shouldReturnAlmost` 1.5
describe "SMAPE" $ do
it "simple test" $
runGEvalTest "smape-simple" `shouldReturnAlmost` 54.2222222222222222
describe "Spearman's rank correlation coefficient" $ do
it "simple test" $ do
runGEvalTest "spearman-simple" `shouldReturnAlmost` (- 0.5735)