From 41e23e1cec9de4030c72e7bf4e78e058deddf35b Mon Sep 17 00:00:00 2001 From: Adarsh S <4darshofficial@gmail.com> Date: Wed, 22 Aug 2018 21:33:07 +0530 Subject: [PATCH] Add minor fixes & Fix python2 compatibility issues(#95) * Make submission metadata optional * Fix server error in leaderboards * Fix utf-8 error * Fix python 2 compatibility issue --- evalai/challenges.py | 11 ++++++----- evalai/utils/challenges.py | 4 +++- evalai/utils/common.py | 2 +- evalai/utils/submissions.py | 3 ++- setup.py | 4 +++- tests/test_challenges.py | 1 + tests/test_submissions.py | 4 ++-- 7 files changed, 18 insertions(+), 11 deletions(-) diff --git a/evalai/challenges.py b/evalai/challenges.py index fd2a463c2..b95f9ac96 100644 --- a/evalai/challenges.py +++ b/evalai/challenges.py @@ -202,11 +202,12 @@ def submit(ctx, file): """ submission_metadata = {} if click.confirm('Do you want to include the Submission Details?'): - submission_metadata = {} - submission_metadata["method_name"] = click.prompt(style('Method Name', fg="yellow"), type=str) - submission_metadata["method_description"] = click.prompt(style('Method Description', fg="yellow"), type=str) - submission_metadata["project_url"] = click.prompt(style('Project URL', fg="yellow"), type=str) - submission_metadata["publication_url"] = click.prompt(style('Publication URL', fg="yellow"), type=str) + submission_metadata["method_name"] = click.prompt(style('Method Name', fg="yellow"), type=str, default="") + submission_metadata["method_description"] = click.prompt(style('Method Description', fg="yellow"), + type=str, default="") + submission_metadata["project_url"] = click.prompt(style('Project URL', fg="yellow"), type=str, default="") + submission_metadata["publication_url"] = click.prompt(style('Publication URL', fg="yellow"), + type=str, default="") make_submission(ctx.challenge_id, ctx.phase_id, file, submission_metadata) diff --git a/evalai/utils/challenges.py b/evalai/utils/challenges.py index 8bb16db71..7a385aa16 100644 --- a/evalai/utils/challenges.py +++ b/evalai/utils/challenges.py @@ -439,6 +439,7 @@ def pretty_print_leaderboard_data(attributes, results): """ leaderboard_table = BeautifulTable(max_width=150) attributes = ["Rank", "Participant Team"] + attributes + ["Last Submitted"] + attributes = list(map(lambda item: str(item), attributes)) leaderboard_table.column_headers = attributes for rank, result in enumerate(results, start=1): @@ -465,9 +466,10 @@ def display_leaderboard(challenge_id, phase_split_id): except requests.exceptions.HTTPError as err: if (response.status_code in EVALAI_ERROR_CODES): validate_token(response.json()) - echo(style("Error: {}".format(response.json()["error"], fg="red", bold=True))) + echo(style("Error: {}".format(response.json()["error"]), fg="red", bold=True)) else: echo(err) + sys.exit(1) except requests.exceptions.RequestException as err: echo(style("\nCould not establish a connection to EvalAI." " Please check the Host URL.\n", bold=True, fg="red")) diff --git a/evalai/utils/common.py b/evalai/utils/common.py index b2a2c347d..c76390b93 100644 --- a/evalai/utils/common.py +++ b/evalai/utils/common.py @@ -66,4 +66,4 @@ def clean_data(data): """ data = BeautifulSoup(data, "lxml").text.strip() data = ' '.join(data.split()) - return data + data.encode("utf-8") diff --git a/evalai/utils/submissions.py b/evalai/utils/submissions.py index 0ba4da90e..5fbfdb099 100644 --- a/evalai/utils/submissions.py +++ b/evalai/utils/submissions.py @@ -24,8 +24,9 @@ def make_submission(challenge_id, phase_id, file, submission_metadata={}): input_file = {'input_file': file} data = { 'status': 'submitting', - **submission_metadata, } + data = dict(data, **submission_metadata) + try: response = requests.post( url, diff --git a/setup.py b/setup.py index 90023c19b..6985e55e9 100644 --- a/setup.py +++ b/setup.py @@ -1,11 +1,13 @@ #!/usr/bin/env python +import io + from setuptools import setup, find_packages PROJECT = 'evalai' -with open('README.md', encoding='utf-8') as f: +with io.open('README.md', encoding='utf-8') as f: long_description = f.read() setup( diff --git a/tests/test_challenges.py b/tests/test_challenges.py index 47ff7a12a..e2f745a3d 100644 --- a/tests/test_challenges.py +++ b/tests/test_challenges.py @@ -613,6 +613,7 @@ def test_display_leaderboard(self): table = BeautifulTable(max_width=150) attributes = ["Rank", "Participant Team"] + attributes + ["Last Submitted"] + attributes = list(map(lambda item: str(item), attributes)) table.column_headers = attributes for rank, result in enumerate(self.leaderboard, start=1): diff --git a/tests/test_submissions.py b/tests/test_submissions.py index ea4beefc6..bb4c72a3c 100644 --- a/tests/test_submissions.py +++ b/tests/test_submissions.py @@ -105,8 +105,8 @@ def test_make_submission_when_file_is_valid_without_metadata(self): @responses.activate def test_make_submission_when_file_is_valid_with_metadata(self): expected = "Do you want to include the Submission Details? [y/N]: Y" - expected = "{}\n{}".format(expected, ("Method Name: Test\nMethod Description: " - "Test\nProject URL: Test\nPublication URL: Test\n")) + expected = "{}\n{}".format(expected, ("Method Name []: Test\nMethod Description []: " + "Test\nProject URL []: Test\nPublication URL []: Test\n")) expected = "{}\n{}".format(expected, ("Your file {} with the ID {} is successfully submitted.\n\n" "You can use `evalai submission {}` to view this " "submission's status.").format("test_file.txt", "9", "9"))