diff --git a/geval.cabal b/geval.cabal index ec75042..e48552b 100644 --- a/geval.cabal +++ b/geval.cabal @@ -1,5 +1,5 @@ name: geval -version: 1.21.0.0 +version: 1.21.1.0 synopsis: Machine learning evaluation tools description: Please see README.md homepage: http://github.com/name/project diff --git a/src/GEval/MetricsMeta.hs b/src/GEval/MetricsMeta.hs index f65cde8..2158ba9 100644 --- a/src/GEval/MetricsMeta.hs +++ b/src/GEval/MetricsMeta.hs @@ -9,7 +9,8 @@ module GEval.MetricsMeta getEvaluationSchemeDescription, outContents, expectedScore, - allMetricsDescription) + allMetricsDescription, + helpMetricParameterMetricsList) where import GEval.Common @@ -146,6 +147,12 @@ expectedScore (EvaluationScheme (ProbabilisticMultiLabelFMeasure beta) []) recall = 0.675 in weightedHarmonicMean beta precision recall +helpMetricParameterMetricsList :: String +helpMetricParameterMetricsList = intercalate ", " $ map (\s -> (show s) ++ (case extraInfo s of + Just eI -> " (" ++ eI ++ ")" + Nothing -> "")) + listOfAvailableEvaluationSchemes + listOfAvailableEvaluationSchemes :: [EvaluationScheme] listOfAvailableEvaluationSchemes = map (\m -> EvaluationScheme m []) listOfAvailableMetrics ++ [ diff --git a/src/GEval/OptionsParser.hs b/src/GEval/OptionsParser.hs index f2e7566..c870b78 100644 --- a/src/GEval/OptionsParser.hs +++ b/src/GEval/OptionsParser.hs @@ -27,7 +27,7 @@ import Data.Monoid ((<>)) import GEval.Core import GEval.EvaluationScheme -import GEval.MetricsMeta (extraInfo, listOfAvailableEvaluationSchemes, allMetricsDescription) +import GEval.MetricsMeta (extraInfo, listOfAvailableEvaluationSchemes, allMetricsDescription, helpMetricParameterMetricsList) import GEval.Common import GEval.CreateChallenge import GEval.LineByLine @@ -247,11 +247,7 @@ metricReader = many $ option auto -- actually `some` should be used inst ( long "metric" -- --metric might be in the config.txt file... <> short 'm' <> metavar "METRIC" - <> help ("Metric to be used, e.g.:" ++ intercalate ", " (map - (\s -> (show s) ++ (case extraInfo s of - Just eI -> " (" ++ eI ++ ")" - Nothing -> "")) - listOfAvailableEvaluationSchemes))) + <> help ("Metric to be used, e.g.:" ++ helpMetricParameterMetricsList)) -- RMSE, MSE, MAE, SMAPE, Pearson, Spearman, Accuracy, LogLoss, Likelihood, F-measure (specify as F1, F2, F0.25, etc.), macro F-measure (specify as Macro-F1, Macro-F2, Macro-F0.25, etc.), multi-label F-measure (specify as MultiLabel-F1, MultiLabel-F2, MultiLabel-F0.25, etc.), MultiLabel-Likelihood, MAP, BLEU, GLEU (\"Google GLEU\" not the grammar correction metric), WER, NMI, ClippEU, LogLossHashed, LikelihoodHashed, BIO-F1, BIO-F1-Labels, TokenAccuracy, soft F-measure (specify as Soft-F1, Soft-F2, Soft-F0.25), probabilistic soft F-measure (specify as Probabilistic-Soft-F1, Probabilistic-Soft-F2, Probabilistic-Soft-F0.25) or CharMatch" )