-
Notifications
You must be signed in to change notification settings - Fork 5
/
evaluator.py
82 lines (67 loc) · 3.05 KB
/
evaluator.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import pandas as pd
import numpy as np
class AIcrowdEvaluator:
def __init__(self, ground_truth_path, **kwargs):
"""
This is the AIcrowd evaluator class which will be used for the evaluation.
Please note that the class name should be `AIcrowdEvaluator`
`ground_truth` : Holds the path for the ground truth which is used to score the submissions.
"""
self.ground_truth_path = ground_truth_path
def _evaluate(self, client_payload, _context={}):
"""
`client_payload` will be a dict with (atleast) the following keys :
- submission_file_path : local file path of the submitted file
- aicrowd_submission_id : A unique id representing the submission
- aicrowd_participant_id : A unique id for participant/team submitting (if enabled)
"""
submission_file_path = client_payload["submission_file_path"]
aicrowd_submission_id = client_payload["aicrowd_submission_id"]
aicrowd_participant_uid = client_payload["aicrowd_participant_id"]
submission = pd.read_csv(submission_file_path)
# Or your preferred way to read your submission
"""
Do something with your submitted file to come up
with a score and a secondary score.
If you want to report back an error to the user,
then you can simply do :
`raise Exception("YOUR-CUSTOM-ERROR")`
You are encouraged to add as many validations as possible
to provide meaningful feedback to your users
"""
_result_object = {
"score": np.random.random(),
"score_secondary" : np.random.random()
}
media_dir = '/tmp/'
"""
To add media to the result object such that it shows on the challenge leaderboard:
- Save the file at '/tmp/<filename>'
- Add the path of the media to the result object:
For images, add file path to _result_object["media_image_path"]
For videos, add file path to _result_object["media_video_path"] and
add file path to _result_object["media_video_thumb_path"] (for small video going on the leaderboard)
For example,
_result_object["media_image_path"] = '/tmp/submission-image.png'
_result_object["media_video_path"] = '/tmp/submission-video.mp4'
_result_object["media_video_thumb_path"] = '/tmp/submission-video-small.mp4'
"""
assert "score" in _result_object
assert "score_secondary" in _result_object
return _result_object
if __name__ == "__main__":
# Lets assume the the ground_truth is a CSV file
# and is present at data/ground_truth.csv
# and a sample submission is present at data/sample_submission.csv
ground_truth_path = "data/ground_truth.csv"
_client_payload = {}
_client_payload["submission_file_path"] = "data/sample_submission.csv"
_client_payload["aicrowd_submission_id"] = 1234
_client_payload["aicrowd_participant_id"] = 1234
# Instaiate a dummy context
_context = {}
# Instantiate an evaluator
aicrowd_evaluator = AIcrowdEvaluator(ground_truth_path)
# Evaluate
result = aicrowd_evaluator._evaluate(_client_payload, _context)
print(result)