Add helper function to options
This commit is contained in:
parent
00addc5620
commit
fd12a55bcd
@ -1,5 +1,5 @@
|
||||
name: geval
|
||||
version: 1.21.0.0
|
||||
version: 1.21.1.0
|
||||
synopsis: Machine learning evaluation tools
|
||||
description: Please see README.md
|
||||
homepage: http://github.com/name/project
|
||||
|
@ -9,7 +9,8 @@ module GEval.MetricsMeta
|
||||
getEvaluationSchemeDescription,
|
||||
outContents,
|
||||
expectedScore,
|
||||
allMetricsDescription)
|
||||
allMetricsDescription,
|
||||
helpMetricParameterMetricsList)
|
||||
where
|
||||
|
||||
import GEval.Common
|
||||
@ -146,6 +147,12 @@ expectedScore (EvaluationScheme (ProbabilisticMultiLabelFMeasure beta) [])
|
||||
recall = 0.675
|
||||
in weightedHarmonicMean beta precision recall
|
||||
|
||||
helpMetricParameterMetricsList :: String
|
||||
helpMetricParameterMetricsList = intercalate ", " $ map (\s -> (show s) ++ (case extraInfo s of
|
||||
Just eI -> " (" ++ eI ++ ")"
|
||||
Nothing -> ""))
|
||||
listOfAvailableEvaluationSchemes
|
||||
|
||||
listOfAvailableEvaluationSchemes :: [EvaluationScheme]
|
||||
listOfAvailableEvaluationSchemes = map (\m -> EvaluationScheme m []) listOfAvailableMetrics
|
||||
++ [
|
||||
|
@ -27,7 +27,7 @@ import Data.Monoid ((<>))
|
||||
|
||||
import GEval.Core
|
||||
import GEval.EvaluationScheme
|
||||
import GEval.MetricsMeta (extraInfo, listOfAvailableEvaluationSchemes, allMetricsDescription)
|
||||
import GEval.MetricsMeta (extraInfo, listOfAvailableEvaluationSchemes, allMetricsDescription, helpMetricParameterMetricsList)
|
||||
import GEval.Common
|
||||
import GEval.CreateChallenge
|
||||
import GEval.LineByLine
|
||||
@ -247,11 +247,7 @@ metricReader = many $ option auto -- actually `some` should be used inst
|
||||
( long "metric" -- --metric might be in the config.txt file...
|
||||
<> short 'm'
|
||||
<> metavar "METRIC"
|
||||
<> help ("Metric to be used, e.g.:" ++ intercalate ", " (map
|
||||
(\s -> (show s) ++ (case extraInfo s of
|
||||
Just eI -> " (" ++ eI ++ ")"
|
||||
Nothing -> ""))
|
||||
listOfAvailableEvaluationSchemes)))
|
||||
<> help ("Metric to be used, e.g.:" ++ helpMetricParameterMetricsList))
|
||||
|
||||
|
||||
-- RMSE, MSE, MAE, SMAPE, Pearson, Spearman, Accuracy, LogLoss, Likelihood, F-measure (specify as F1, F2, F0.25, etc.), macro F-measure (specify as Macro-F1, Macro-F2, Macro-F0.25, etc.), multi-label F-measure (specify as MultiLabel-F1, MultiLabel-F2, MultiLabel-F0.25, etc.), MultiLabel-Likelihood, MAP, BLEU, GLEU (\"Google GLEU\" not the grammar correction metric), WER, NMI, ClippEU, LogLossHashed, LikelihoodHashed, BIO-F1, BIO-F1-Labels, TokenAccuracy, soft F-measure (specify as Soft-F1, Soft-F2, Soft-F0.25), probabilistic soft F-measure (specify as Probabilistic-Soft-F1, Probabilistic-Soft-F2, Probabilistic-Soft-F0.25) or CharMatch" )
|
||||
|
Loading…
Reference in New Issue
Block a user