forked from Cloud-CV/evalai-cli
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest_submissions.py
122 lines (95 loc) · 5.18 KB
/
test_submissions.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
import json
import responses
from click.testing import CliRunner
from datetime import datetime
from dateutil import tz
from evalai.challenges import challenge
from evalai.submissions import submission
from tests.data import submission_response
from evalai.utils.config import API_HOST_URL
from evalai.utils.urls import URLS
from .base import BaseTestClass
class TestGetSubmissionDetails(BaseTestClass):
def setup(self):
self.submission = json.loads(submission_response.submission_result)
url = "{}{}"
responses.add(responses.GET, url.format(API_HOST_URL, URLS.get_submission.value).format("9"),
json=self.submission, status=200)
@responses.activate
def test_display_submission_details(self):
team_title = "\n{}".format(self.submission['participant_team_name'])
sid = "Submission ID: {}\n".format(str(self.submission['id']))
team = "{} {}".format(team_title, sid)
status = "\nSubmission Status : {}\n".format(self.submission['status'])
execution_time = "\nExecution Time (sec) : {}\n".format(self.submission['execution_time'])
date = datetime.strptime(self.submission['submitted_at'], "%Y-%m-%dT%H:%M:%S.%fZ")
from_zone = tz.tzutc()
to_zone = tz.tzlocal()
date = date.replace(tzinfo=from_zone)
converted_date = date.astimezone(to_zone)
date = converted_date.strftime('%D %r')
submitted_at = "\nSubmitted At : {}\n".format(date)
submission_data = "{}{}{}{}\n".format(team, status, execution_time, submitted_at)
runner = CliRunner()
result = runner.invoke(submission, ['9'])
response = result.output
assert response == submission_data
@responses.activate
def test_display_submission_details_with_a_string_argument(self):
expected = ("Usage: submission [OPTIONS] SUBMISSION COMMAND [ARGS]...\n"
"\nError: Invalid value for \"SUBMISSION\": two is not a valid integer\n")
runner = CliRunner()
result = runner.invoke(submission, ['two'])
response = result.output
assert response == expected
@responses.activate
def test_display_submission_details_with_no_argument(self):
expected = ("Usage: submission [OPTIONS] SUBMISSION COMMAND [ARGS]...\n"
"\nError: Missing argument \"SUBMISSION\".\n")
runner = CliRunner()
result = runner.invoke(submission)
response = result.output
assert response == expected
class TestMakeSubmission(BaseTestClass):
def setup(self):
self.submission = json.loads(submission_response.submission_result)
url = "{}{}"
responses.add(responses.POST, url.format(API_HOST_URL, URLS.make_submission.value).format("1", "2"),
json=self.submission, status=200)
@responses.activate
def test_make_submission_when_file_is_not_valid(self):
expected = ("Usage: challenge phase submit [OPTIONS]\n"
"\nError: Invalid value for \"--file\": Could not open file: file: No such file or directory\n")
runner = CliRunner()
result = runner.invoke(challenge, ['1', 'phase', '2', 'submit', '--file', 'file'])
response = result.output
assert response == expected
@responses.activate
def test_make_submission_when_file_is_valid_without_metadata(self):
expected = ("Your file {} with the ID {} is successfully submitted.\n\n"
"You can use `evalai submission {}` to view this submission's status.").format("test_file.txt",
"9", "9")
expected = "Do you want to include the Submission Details? [y/N]: N\n\n{}".format(expected)
runner = CliRunner()
with runner.isolated_filesystem():
with open('test_file.txt', 'w') as f:
f.write('1 2 3 4 5 6')
result = runner.invoke(challenge, ['1', 'phase', '2', 'submit', '--file', 'test_file.txt'], input="N")
assert result.exit_code == 0
assert result.output.strip() == expected
@responses.activate
def test_make_submission_when_file_is_valid_with_metadata(self):
expected = "Do you want to include the Submission Details? [y/N]: Y"
expected = "{}\n{}".format(expected, ("Method Name []: Test\nMethod Description []: "
"Test\nProject URL []: Test\nPublication URL []: Test\n"))
expected = "{}\n{}".format(expected, ("Your file {} with the ID {} is successfully submitted.\n\n"
"You can use `evalai submission {}` to view this "
"submission's status.").format("test_file.txt", "9", "9"))
runner = CliRunner()
with runner.isolated_filesystem():
with open('test_file.txt', 'w') as f:
f.write('1 2 3 4 5 6')
result = runner.invoke(challenge, ['1', 'phase', '2', 'submit', '--file', "test_file.txt"],
input="Y\nTest\nTest\nTest\nTest")
assert result.exit_code == 0
assert result.output.strip() == expected