diff --git a/README.rst b/README.rst
index d33ad27d..b3c57e07 100644
--- a/README.rst
+++ b/README.rst
@@ -1083,6 +1083,32 @@ test_publish_article.py:
         pass
 
 
+Using Asyncio
+-------------
+
+Async scenario functions have to be marked with `@pytest.mark.asyncio`.
+
+.. code-block:: python
+
+    @pytest.mark.asyncio
+    @scenario('test.feature', 'Launching scenario function')
+    async def test_launching_scenario_function():
+        pass
+
+    @given("i have async step")
+    async def async_given():
+        pass
+
+
+    @when("i do async step")
+    async def async_when():
+        pass
+
+
+    @then("i should have async step")
+    async def async_then():
+        pass
+
 Hooks
 -----
 
@@ -1112,6 +1138,16 @@ which might be helpful building useful reporting, visualization, etc on top of i
 * pytest_bdd_step_func_lookup_error(request, feature, scenario, step, exception) - Called when step lookup failed
 
 
+Async hooks
+-----------
+
+If you want any of above hooks be asynchronous just define it as `async def` instead of `def` like this:
+
+.. code-block:: python
+
+    async def pytest_bdd_before_scenario(request, feature, scenario):
+        pass
+
 Browser testing
 ---------------
 
diff --git a/pytest_bdd/scenario.py b/pytest_bdd/scenario.py
index ba12bcb8..091f3f19 100644
--- a/pytest_bdd/scenario.py
+++ b/pytest_bdd/scenario.py
@@ -26,8 +26,7 @@
 from .feature import Feature, force_unicode, get_features
 from .steps import get_caller_module, get_step_fixture_name, inject_fixture
 from .types import GIVEN
-from .utils import CONFIG_STACK, get_args
-
+from .utils import CONFIG_STACK, get_args, run_coroutines
 
 PYTHON_REPLACE_REGEX = re.compile(r"\W")
 ALPHA_REGEX = re.compile(r"^\d+_*")
@@ -102,7 +101,7 @@ def _execute_step_function(request, scenario, step, step_func):
     """
     kw = dict(request=request, feature=scenario.feature, scenario=scenario, step=step, step_func=step_func)
 
-    request.config.hook.pytest_bdd_before_step(**kw)
+    run_coroutines(*request.config.hook.pytest_bdd_before_step(**kw), request=request)
 
     kw["step_func_args"] = {}
     try:
@@ -110,12 +109,15 @@ def _execute_step_function(request, scenario, step, step_func):
         kwargs = dict((arg, request.getfixturevalue(arg)) for arg in get_args(step_func))
         kw["step_func_args"] = kwargs
 
-        request.config.hook.pytest_bdd_before_step_call(**kw)
+        run_coroutines(*request.config.hook.pytest_bdd_before_step_call(**kw), request=request)
+
         # Execute the step.
-        step_func(**kwargs)
-        request.config.hook.pytest_bdd_after_step(**kw)
+        result_or_coro = step_func(**kwargs)
+        run_coroutines(result_or_coro, request=request)
+
+        run_coroutines(*request.config.hook.pytest_bdd_after_step(**kw), request=request)
     except Exception as exception:
-        request.config.hook.pytest_bdd_step_error(exception=exception, **kw)
+        run_coroutines(*request.config.hook.pytest_bdd_step_error(exception=exception, **kw), request=request)
         raise
 
 
@@ -127,7 +129,10 @@ def _execute_scenario(feature, scenario, request, encoding):
     :param request: request.
     :param encoding: Encoding.
     """
-    request.config.hook.pytest_bdd_before_scenario(request=request, feature=feature, scenario=scenario)
+    run_coroutines(
+        *request.config.hook.pytest_bdd_before_scenario(request=request, feature=feature, scenario=scenario),
+        request=request
+    )
 
     try:
         givens = set()
@@ -136,9 +141,10 @@ def _execute_scenario(feature, scenario, request, encoding):
             try:
                 step_func = _find_step_function(request, step, scenario, encoding=encoding)
             except exceptions.StepDefinitionNotFoundError as exception:
-                request.config.hook.pytest_bdd_step_func_lookup_error(
+                results_or_coros = request.config.hook.pytest_bdd_step_func_lookup_error(
                     request=request, feature=feature, scenario=scenario, step=step, exception=exception
                 )
+                run_coroutines(*results_or_coros, request=request)
                 raise
 
             try:
@@ -152,7 +158,7 @@ def _execute_scenario(feature, scenario, request, encoding):
                         )
                     givens.add(step_func.fixture)
             except exceptions.ScenarioValidationError as exception:
-                request.config.hook.pytest_bdd_step_validation_error(
+                results_or_coros = request.config.hook.pytest_bdd_step_validation_error(
                     request=request,
                     feature=feature,
                     scenario=scenario,
@@ -161,11 +167,15 @@ def _execute_scenario(feature, scenario, request, encoding):
                     exception=exception,
                     step_func_args=dict((arg, request.getfixturevalue(arg)) for arg in get_args(step_func)),
                 )
+                run_coroutines(*results_or_coros, request=request)
                 raise
 
             _execute_step_function(request, scenario, step, step_func)
     finally:
-        request.config.hook.pytest_bdd_after_scenario(request=request, feature=feature, scenario=scenario)
+        run_coroutines(
+            *request.config.hook.pytest_bdd_after_scenario(request=request, feature=feature, scenario=scenario),
+            request=request
+        )
 
 
 FakeRequest = collections.namedtuple("FakeRequest", ["module"])
diff --git a/pytest_bdd/utils.py b/pytest_bdd/utils.py
index 879f282b..0f2f6d00 100644
--- a/pytest_bdd/utils.py
+++ b/pytest_bdd/utils.py
@@ -1,7 +1,8 @@
 """Various utility functions."""
-
 import inspect
 
+from _pytest.fixtures import FixtureLookupError
+
 CONFIG_STACK = []
 
 
@@ -31,3 +32,42 @@ def get_parametrize_markers_args(node):
     This function uses that API if it is available otherwise it uses MarkInfo objects.
     """
     return tuple(arg for mark in node.iter_markers("parametrize") for arg in mark.args)
+
+
+def run_coroutines(*results_or_coroutines, request):
+    """
+    Takes provided coroutine(s) or function(s) result(s) (that can be any type) and for every one of them:
+        * if it is coroutine - runs it using event_loop fixture and adds its result to the batch,
+        * if it isn't coroutine - just adds it to the batch.
+    Then returns batch of results (or single result).
+
+    Example usage:
+        >>> def regular_fn(): return 24
+        >>> async def async_fn(): return 42
+        >>>
+        >>> assert run_coroutines(regular_fn(), request=request) == 24
+        >>> assert run_coroutines(async_fn(), request=request) == 42
+        >>> assert run_coroutines(regular_fn(), async_fn(), request=request) == (24, 42)
+
+    :param results_or_coroutines: coroutine(s) to run or function results to let-through
+    :param request: request fixture
+    :return: single result (if there was single coroutine/result provided as input) or multiple results (otherwise)
+    """
+
+    def run_with_event_loop_fixture(coro):
+        try:
+            event_loop = request.getfixturevalue("event_loop")
+        except FixtureLookupError:
+            raise ValueError("Install pytest-asyncio plugin to run asynchronous steps.")
+
+        return event_loop.run_until_complete(coro)
+
+    results = [
+        run_with_event_loop_fixture(result_or_coro) if inspect.iscoroutine(result_or_coro) else result_or_coro
+        for result_or_coro in results_or_coroutines
+    ]
+
+    if len(results) == 1:
+        return results[0]
+    else:
+        return tuple(results)
diff --git a/requirements-testing.txt b/requirements-testing.txt
index 932a8957..6dd85296 100644
--- a/requirements-testing.txt
+++ b/requirements-testing.txt
@@ -1 +1,6 @@
 mock
+requests
+flask_api
+aiohttp
+pytest-asyncio
+async_generator
diff --git a/setup.py b/setup.py
index bdc9ca07..dcacb019 100755
--- a/setup.py
+++ b/setup.py
@@ -50,7 +50,7 @@
         "pytest11": ["pytest-bdd = pytest_bdd.plugin"],
         "console_scripts": ["pytest-bdd = pytest_bdd.scripts:main"],
     },
-    tests_require=["tox"],
+    tests_require=["tox", "flask", "requests", "flask_api", "aiohttp", "pytest-asyncio", "async_generator"],
     packages=["pytest_bdd"],
     include_package_data=True,
 )
diff --git a/tests/asyncio/__init__.py b/tests/asyncio/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/asyncio/conftest.py b/tests/asyncio/conftest.py
new file mode 100644
index 00000000..f61bab62
--- /dev/null
+++ b/tests/asyncio/conftest.py
@@ -0,0 +1 @@
+from tests.asyncio.dummy_app import *
diff --git a/tests/asyncio/dummy_app.py b/tests/asyncio/dummy_app.py
new file mode 100644
index 00000000..c16d1969
--- /dev/null
+++ b/tests/asyncio/dummy_app.py
@@ -0,0 +1,144 @@
+import asyncio
+import contextlib
+import time
+from contextlib import contextmanager
+from datetime import datetime, timedelta
+from multiprocessing.context import Process
+
+import aiohttp
+import pytest
+import requests
+from async_generator import yield_, async_generator
+from flask import Flask, jsonify
+from flask import request
+from flask_api.status import HTTP_404_NOT_FOUND, HTTP_200_OK
+
+
+@contextmanager
+def setup_and_teardown_flask_app(app: Flask, host: str, port: int):
+    """
+    Manages setup of provided flask app on given `host` and `port` and its teardown.
+
+    As for setup process following things are done:
+        * `/health` endpoint is added to provided flask app,
+        * app is launched in separate process,
+        * function waits for flask app to fully launch - to do this it repetitively checks `/health` endpoint if it will
+            return status code 200.
+
+    Example use of this function in fixture:
+
+    >>> with setup_and_teardown_flask_app(Flask(__name__), "localhost", 10000):
+    >>>     yield
+
+    :param app: app to launch
+    :param host: host on which to launch app
+    :param port: port on which to launch app
+    """
+
+    def wait_for_flask_app_to_be_accessible():
+        timeout = 1
+        end_time = datetime.now() + timedelta(seconds=timeout)
+        response = requests.Response()
+        response.status_code = HTTP_404_NOT_FOUND
+
+        while response.status_code != HTTP_200_OK and datetime.now() < end_time:
+            with contextlib.suppress(requests.exceptions.ConnectionError):
+                response = requests.request("POST", "http://{}:{}/health".format(host, port))
+            time.sleep(0.01)
+
+        fail_message = "Timeout expired: failed to start mock REST API in {} seconds".format(timeout)
+        assert response.status_code == HTTP_200_OK, fail_message
+
+    app.route("/health", methods=["POST"])(lambda: "OK")
+
+    process = Process(target=app.run, args=(host, port))
+    process.start()
+
+    wait_for_flask_app_to_be_accessible()
+    yield
+
+    process.terminate()
+    process.join()
+
+
+def create_server():
+    app = Flask(__name__)
+    app.pre_computation_value = 0
+    app.post_computation_value = 0
+
+    @app.route("/pre-computation-value", methods=["PUT"])
+    def set_pre_computation_value():
+        app.pre_computation_value = request.json["value"]
+        return ""
+
+    @app.route("/pre-computation-value", methods=["GET"])
+    def get_pre_computation_value():
+        return jsonify(app.pre_computation_value)
+
+    @app.route("/post-computation-value", methods=["PUT"])
+    def set_post_computation_value():
+        app.post_computation_value = request.json["value"]
+        return ""
+
+    @app.route("/post-computation-value", methods=["GET"])
+    def get_post_computation_value():
+        return jsonify(app.post_computation_value)
+
+    return app
+
+
+class DummyApp:
+    """
+    This has to simulate real application that gets input from server, processes it and posts it.
+    """
+
+    def __init__(self, host, port, tick_rate_s):
+        self.host = host
+        self.port = port
+        self.tick_rate_s = tick_rate_s
+        self.stored_value = 0
+
+    async def run(self):
+        await asyncio.gather(self.run_getter(), self.run_poster())
+
+    async def run_getter(self):
+        async with aiohttp.ClientSession() as session:
+            while True:
+                response = await session.get("http://{}:{}/pre-computation-value".format(self.host, self.port))
+                self.stored_value = int(await response.text())
+                await asyncio.sleep(self.tick_rate_s)
+
+    async def run_poster(self):
+        async with aiohttp.ClientSession() as session:
+            while True:
+                await session.put(
+                    "http://{}:{}/post-computation-value".format(self.host, self.port),
+                    json={"value": self.stored_value + 1},
+                )
+                await asyncio.sleep(self.tick_rate_s)
+
+
+@pytest.fixture
+def dummy_server_host():
+    return "localhost"
+
+
+@pytest.fixture
+def launch_dummy_server(dummy_server_host, unused_tcp_port):
+    with setup_and_teardown_flask_app(create_server(), dummy_server_host, unused_tcp_port):
+        yield
+
+
+@pytest.fixture
+def app_tick_interval():
+    return 0.01
+
+
+@pytest.fixture
+@async_generator
+async def launch_dummy_app(event_loop, launch_dummy_server, dummy_server_host, unused_tcp_port, app_tick_interval):
+    app = DummyApp(dummy_server_host, unused_tcp_port, app_tick_interval)
+    task = event_loop.create_task(app.run())
+    await yield_(None)
+    task.cancel()
+    await asyncio.sleep(0)
diff --git a/tests/asyncio/test_async_given_returns_value.feature b/tests/asyncio/test_async_given_returns_value.feature
new file mode 100644
index 00000000..309f6997
--- /dev/null
+++ b/tests/asyncio/test_async_given_returns_value.feature
@@ -0,0 +1,9 @@
+Feature: Async given is a fixture and its value is properly returned
+
+  Scenario: Async given shadows fixture
+    Given i have given that shadows fixture with value of 42
+    Then shadowed fixture value should be equal to 42
+
+  Scenario: Async given is a fixture
+    Given i have given that is a fixture with value of 42
+    Then value of given as a fixture should be equal to 42
diff --git a/tests/asyncio/test_async_given_returns_value.py b/tests/asyncio/test_async_given_returns_value.py
new file mode 100644
index 00000000..e30695df
--- /dev/null
+++ b/tests/asyncio/test_async_given_returns_value.py
@@ -0,0 +1,30 @@
+import pytest
+
+from pytest_bdd import given, parsers, then, scenarios
+
+scenarios("test_async_given_returns_value.feature")
+
+
+@pytest.fixture
+def my_value():
+    return 0
+
+
+@given(parsers.parse("i have given that shadows fixture with value of {value:d}"), target_fixture="my_value")
+async def i_have_given_that_shadows_fixture_with_value_of(value):
+    return value
+
+
+@given(parsers.parse("i have given that is a fixture with value of {value:d}"))
+async def i_have_given_that_is_a_fixture_with_value_of(value):
+    return value
+
+
+@then(parsers.parse("shadowed fixture value should be equal to {value:d}"))
+async def my_fixture_value_should_be_equal_to(value, my_value):
+    assert value == my_value
+
+
+@then(parsers.parse("value of given as a fixture should be equal to {value:d}"))
+async def value_of_given_as_a_fixture_should_be_equal_to(value, i_have_given_that_is_a_fixture_with_value_of):
+    assert value == i_have_given_that_is_a_fixture_with_value_of
diff --git a/tests/asyncio/test_async_hooks.py b/tests/asyncio/test_async_hooks.py
new file mode 100644
index 00000000..efa510ae
--- /dev/null
+++ b/tests/asyncio/test_async_hooks.py
@@ -0,0 +1,166 @@
+import textwrap
+
+import pytest
+
+
+@pytest.fixture
+def feature_file(testdir):
+    testdir.makefile(
+        ".feature",
+        test=textwrap.dedent(
+            """
+            Feature: Async hooks are being launched
+
+                Scenario: Launching async hooks
+                    Given i have step
+                    And i have another step
+            """
+        ),
+    )
+
+
+@pytest.fixture
+def hook_file(testdir):
+    testdir.makeconftest(
+        textwrap.dedent(
+            """
+            async def pytest_bdd_before_scenario(request, feature, scenario):
+                print("\\npytest_bdd_before_scenario")
+
+            async def pytest_bdd_after_scenario(request, feature, scenario):
+                print("\\npytest_bdd_after_scenario")
+
+            async def pytest_bdd_before_step(request, feature, scenario, step, step_func):
+                print("\\npytest_bdd_before_step")
+
+            async def pytest_bdd_before_step_call(request, feature, scenario, step, step_func, step_func_args):
+                print("\\npytest_bdd_before_step_call")
+
+            async def pytest_bdd_after_step(request, feature, scenario, step, step_func, step_func_args):
+                print("\\npytest_bdd_after_step")
+
+            async def pytest_bdd_step_error(request, feature, scenario, step, step_func, step_func_args, exception):
+                print("\\npytest_bdd_step_error")
+
+            async def pytest_bdd_step_validation_error(request, feature, scenario, step, step_func, step_func_args,
+                                                       exception):
+                print("\\npytest_bdd_step_validation_error")
+
+            async def pytest_bdd_step_func_lookup_error(request, feature, scenario, step, exception):
+                print("\\npytest_bdd_step_func_lookup_error")
+            """
+        )
+    )
+
+
+def test_async_non_error_hooks_are_being_launched(feature_file, hook_file, testdir):
+    testdir.makepyfile(
+        textwrap.dedent(
+            """
+            import pytest
+            from pytest_bdd import scenario, given
+
+            @pytest.mark.asyncio
+            @scenario('test.feature', 'Launching async hooks')
+            def test_launching_async_hooks():
+                pass
+
+            @given("i have step")
+            def i_have_step():
+                pass
+            """
+        )
+    )
+
+    result = testdir.runpytest("-s")
+
+    assert result.stdout.lines.count("pytest_bdd_before_scenario") == 1
+    assert result.stdout.lines.count("pytest_bdd_after_scenario") == 1
+    assert result.stdout.lines.count("pytest_bdd_before_step") == 1
+    assert result.stdout.lines.count("pytest_bdd_before_step_call") == 1
+    assert result.stdout.lines.count("pytest_bdd_after_step") == 1
+
+
+def test_async_step_func_lookup_error_hook_is_being_launched(feature_file, hook_file, testdir):
+    testdir.makepyfile(
+        textwrap.dedent(
+            """
+            import pytest
+            from pytest_bdd import scenario, given
+
+            @pytest.mark.asyncio
+            @scenario('test.feature', 'Launching async hooks')
+            def test_launching_async_hooks():
+                pass
+            """
+        )
+    )
+
+    result = testdir.runpytest("-s")
+
+    assert result.stdout.lines.count("pytest_bdd_step_func_lookup_error") == 1
+
+
+def test_async_step_error_hook_is_being_launched(feature_file, hook_file, testdir):
+    testdir.makepyfile(
+        textwrap.dedent(
+            """
+            import pytest
+            from pytest_bdd import scenario, given
+
+            @pytest.mark.asyncio
+            @scenario('test.feature', 'Launching async hooks')
+            def test_launching_async_hooks():
+                pass
+
+            @given("i have step")
+            def i_have_step():
+                raise Exception()
+            """
+        )
+    )
+
+    result = testdir.runpytest("-s")
+
+    assert result.stdout.lines.count("pytest_bdd_step_error") == 1
+
+
+def test_async_step_validation_error_hook_is_being_launched(hook_file, testdir):
+    testdir.makefile(
+        ".feature",
+        test=textwrap.dedent(
+            """
+            Feature: Async hooks are being launched
+
+                Scenario: Launching async hooks
+                    Given i have step
+                    And i have another step
+            """
+        ),
+    )
+
+    testdir.makepyfile(
+        textwrap.dedent(
+            """
+            import pytest
+            from pytest_bdd import scenario, given
+
+            @pytest.mark.asyncio
+            @scenario('test.feature', 'Launching async hooks')
+            def test_launching_async_hooks():
+                pass
+
+            @given("i have step")
+            def i_have_step():
+                pass
+
+            @given("i have another step")
+            def i_have_step():
+                pass
+            """
+        )
+    )
+
+    result = testdir.runpytest("-s")
+
+    assert result.stdout.lines.count("pytest_bdd_step_validation_error") == 1
diff --git a/tests/asyncio/test_async_scenario_function.py b/tests/asyncio/test_async_scenario_function.py
new file mode 100644
index 00000000..7718380c
--- /dev/null
+++ b/tests/asyncio/test_async_scenario_function.py
@@ -0,0 +1,61 @@
+import textwrap
+
+import pytest
+
+
+@pytest.fixture
+def feature_file(testdir):
+    testdir.makefile(
+        ".feature",
+        test=textwrap.dedent(
+            """
+            Feature: Async scenario function is being launched
+
+                Scenario: Launching scenario function
+            """
+        ),
+    )
+
+
+def test_scenario_function_marked_with_async_passes(feature_file, testdir):
+    testdir.makepyfile(
+        textwrap.dedent(
+            """
+            import pytest
+            from pytest_bdd import scenario
+
+            @pytest.mark.asyncio
+            @scenario('test.feature', 'Launching scenario function')
+            async def test_launching_scenario_function():
+                pass
+            """
+        )
+    )
+
+    result = testdir.runpytest()
+    result.assert_outcomes(passed=1)
+
+
+PYTEST_VERSION = tuple([int(i) for i in pytest.__version__.split(".")])
+
+
+@pytest.mark.skipif(
+    PYTEST_VERSION < (5, 1, 0),
+    reason="Async functions not marked as @pytest.mark.asyncio are silently passing on pytest < 5.1.0",
+)
+def test_scenario_function_not_marked_with_async_fails(feature_file, testdir):
+    testdir.makepyfile(
+        textwrap.dedent(
+            """
+            import pytest
+            from pytest_bdd import scenario
+
+            @scenario('test.feature', 'Launching scenario function')
+            async def test_launching_scenario_function():
+                pass
+            """
+        )
+    )
+
+    result = testdir.runpytest()
+    result.assert_outcomes(failed=1)
diff --git a/tests/asyncio/test_async_steps.feature b/tests/asyncio/test_async_steps.feature
new file mode 100644
index 00000000..8d170992
--- /dev/null
+++ b/tests/asyncio/test_async_steps.feature
@@ -0,0 +1,16 @@
+Feature: Async steps
+
+  Scenario: Async steps are actually executed
+    Given i have async step
+    When i do async step
+    Then i should have async step
+
+  Scenario: Async steps are executed along with regular steps
+    Given i have async step
+    And i have regular step
+
+    When i do async step
+    And i do regular step
+
+    Then i should have async step
+    And i should have regular step
diff --git a/tests/asyncio/test_async_steps.py b/tests/asyncio/test_async_steps.py
new file mode 100644
index 00000000..2b58105a
--- /dev/null
+++ b/tests/asyncio/test_async_steps.py
@@ -0,0 +1,48 @@
+import pytest
+
+from pytest_bdd import then, when, given, scenario
+
+
+@pytest.fixture
+def test_value():
+    return {"value": 0}
+
+
+@scenario("test_async_steps.feature", "Async steps are actually executed")
+def test_async_steps_do_work(test_value):
+    assert test_value["value"] == 3
+
+
+@scenario("test_async_steps.feature", "Async steps are executed along with regular steps")
+def test_async_steps_work_with_regular_ones(test_value):
+    assert test_value["value"] == 6
+
+
+@given("i have async step")
+async def async_step(test_value):
+    test_value["value"] += 1
+
+
+@given("i have regular step")
+def i_have_regular_step(test_value):
+    test_value["value"] += 1
+
+
+@when("i do async step")
+async def i_do_async_step(test_value):
+    test_value["value"] += 1
+
+
+@when("i do regular step")
+def i_do_regular_step(test_value):
+    test_value["value"] += 1
+
+
+@then("i should have async step")
+async def i_should_have_async_step(test_value):
+    test_value["value"] += 1
+
+
+@then("i should have regular step")
+def i_should_have_regular_step(test_value):
+    test_value["value"] += 1
diff --git a/tests/asyncio/test_launching_app_in_background.feature b/tests/asyncio/test_launching_app_in_background.feature
new file mode 100644
index 00000000..4bd7f7fa
--- /dev/null
+++ b/tests/asyncio/test_launching_app_in_background.feature
@@ -0,0 +1,6 @@
+Feature: Launching application in async task
+
+  Scenario: App is running during whole scenario
+    Given i have launched app
+    When i post input variable to have value of 3
+    Then output value should be equal to 4
diff --git a/tests/asyncio/test_launching_app_in_background.py b/tests/asyncio/test_launching_app_in_background.py
new file mode 100644
index 00000000..c0e73b78
--- /dev/null
+++ b/tests/asyncio/test_launching_app_in_background.py
@@ -0,0 +1,41 @@
+import asyncio
+from datetime import datetime, timedelta
+
+import aiohttp
+
+from pytest_bdd import given, when, then, scenarios, parsers
+
+scenarios("test_launching_app_in_background.feature")
+
+
+@given("i have launched app")
+async def i_have_launched_app(launch_dummy_app):
+    pass
+
+
+@when(parsers.parse("i post input variable to have value of {value:d}"))
+async def i_post_input_variable(value, dummy_server_host, unused_tcp_port):
+    async with aiohttp.ClientSession() as session:
+        endpoint = "http://{}:{}/pre-computation-value".format(dummy_server_host, unused_tcp_port)
+        await session.put(endpoint, json={"value": value})
+
+
+@then(parsers.parse("output value should be equal to {expected_value:d}"))
+async def output_value_should_be_equal_to(expected_value, dummy_server_host, unused_tcp_port, app_tick_interval):
+    async with aiohttp.ClientSession() as session:
+        timeout = app_tick_interval * 10
+        end_time = datetime.now() + timedelta(seconds=timeout)
+
+        while datetime.now() < end_time:
+            url = "http://{}:{}/post-computation-value".format(dummy_server_host, unused_tcp_port)
+            response = await session.get(url)
+            output_value = int(await response.text())
+
+            if output_value == expected_value:
+                break
+
+            await asyncio.sleep(app_tick_interval)
+        else:
+            raise AssertionError(
+                "Output value of {} isn't equal to expected value of {}.".format(output_value, expected_value)
+            )
diff --git a/tests/asyncio/test_utils.py b/tests/asyncio/test_utils.py
new file mode 100644
index 00000000..6e1afd55
--- /dev/null
+++ b/tests/asyncio/test_utils.py
@@ -0,0 +1,31 @@
+import pytest
+
+from pytest_bdd.utils import run_coroutines
+
+
+def regular_fn():
+    return 24
+
+
+async def async_fn():
+    return 42
+
+
+@pytest.mark.parametrize(
+    ["functions_to_execute", "expected_results"],
+    [
+        (regular_fn(), 24),
+        (async_fn(), 42),
+        ((regular_fn(), regular_fn(), regular_fn()), (24, 24, 24)),
+        ((async_fn(), async_fn(), async_fn()), (42, 42, 42)),
+        ((regular_fn(), async_fn()), (24, 42)),
+    ],
+    ids=["single regular fn", "single async fn", "many regular fns", "many async fns", "mixed fns"],
+)
+def test_run_coroutines(request, functions_to_execute, expected_results):
+    if isinstance(functions_to_execute, tuple):
+        actual_results = run_coroutines(*functions_to_execute, request=request)
+    else:
+        actual_results = run_coroutines(functions_to_execute, request=request)
+
+    assert actual_results == expected_results