From 0ad3a2b2a9496e1d1c6882d4beb0f2d83b6571af Mon Sep 17 00:00:00 2001 From: Shafiq Jetha <1066864+fasterinnerlooper@users.noreply.github.com> Date: Fri, 2 Feb 2024 07:40:39 -0700 Subject: [PATCH] fix: Make language a list in evaluate app, added comments to evaluate description Co-authored-by: Konstantin Chernyshev <38007247+k4black@users.noreply.github.com> --- evaluate_app/codebleu.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/evaluate_app/codebleu.py b/evaluate_app/codebleu.py index 927b73f..7af0ccd 100644 --- a/evaluate_app/codebleu.py +++ b/evaluate_app/codebleu.py @@ -41,7 +41,7 @@ should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. - language: programming language in ['java','js','c_sharp','php','c','python','cpp']. + language: programming language in ['java','js','c_sharp','php','c','python','cpp']. Please note that, due to the way Datasets works, the number of entities in the language array must match the number of entries in the predictions and references arrays, but only the first value from the languages array will be used. This means that you will not be able to compute a metric for different langauges at the same time, but mst do them as sequential calls to CodeBleu. weights: tuple of 4 floats to use as weights for scores. Defaults to (0.25, 0.25, 0.25, 0.25). Returns: codebleu: resulting `CodeBLEU` score, @@ -53,7 +53,7 @@ >>> metric = evaluate.load("k4black/codebleu") >>> ref = "def sum ( first , second ) :\n return second + first" >>> pred = "def add ( a , b ) :\n return a + b" - >>> results = metric.compute(references=[ref], predictions=[pred], language="python") + >>> results = metric.compute(references=[ref], predictions=[pred], language=["python"]) >>> print(results) """ @@ -76,7 +76,7 @@ def _info(self): { "predictions": datasets.Value("string", id="sequence"), "references": datasets.Sequence(datasets.Value("string", id="sequence"), id="references"), - "lang": datasets.Value("string"), + # "lang": datasets.Value("string"), # "weights": datasets.Value("string"), # "tokenizer": datasets.Value("string"), } @@ -85,7 +85,7 @@ def _info(self): { "predictions": datasets.Value("string", id="sequence"), "references": datasets.Value("string", id="sequence"), - "lang": datasets.Value("string"), + # "lang": datasets.Value("string"), # "weights": datasets.Value("string"), # "tokenizer": datasets.Value("string"), } @@ -113,7 +113,7 @@ def _compute(self, predictions, references, lang, weights=(0.25, 0.25, 0.25, 0.2 return self.codebleu_package.calc_codebleu( references=references, predictions=predictions, - lang=lang, + lang=lang[0], weights=weights, tokenizer=tokenizer, )