diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index dcc9411e..6c783ed4 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -38,6 +38,8 @@ jobs:
uses: actions/setup-node@v4
with:
node-version: "*"
+ - name: Install pandoc
+ uses: r-lib/actions/setup-pandoc@v2
- name: Install PyPi dependencies
run: |
python -m pip install --upgrade pip
diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml
index 0c82d7f1..cb227085 100644
--- a/.github/workflows/release.yaml
+++ b/.github/workflows/release.yaml
@@ -16,6 +16,8 @@ jobs:
- uses: actions/setup-python@v4
with:
python-version: "3.13"
+ - name: Install pandoc
+ uses: r-lib/actions/setup-pandoc@v2
- name: Install dependencies
run: |
python -m pip install --upgrade pip build twine
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index b5b14f5b..486198b6 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,6 +1,8 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
---
+default_language_version:
+ python: python3.12
repos:
- repo: https://github.com/psf/black
rev: 24.10.0
@@ -19,7 +21,9 @@ repos:
rev: v5.0.0
hooks:
- id: trailing-whitespace
+ exclude: "^.*\\.rst$"
- id: end-of-file-fixer
+ exclude: "^.*\\.rst$"
- id: check-yaml
- id: check-added-large-files
- id: check-toml
@@ -60,4 +64,9 @@ repos:
language: python
types: [python]
pass_filenames: false
- additional_dependencies: ['docopt-ng', 'pycmarkgfm']
+ additional_dependencies:
+ - 'docopt-ng'
+ - 'pandoc'
+ - 'panflute'
+ - 'pycmarkgfm'
+ - 'pypandoc'
diff --git a/docs/conf.py b/docs/conf.py
index ce85f37e..760bdbfe 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -131,6 +131,9 @@
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
+html_css_files = [
+ "css/custom.css",
+]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
diff --git a/docs/features.rst b/docs/features.rst
deleted file mode 100644
index 38af4566..00000000
--- a/docs/features.rst
+++ /dev/null
@@ -1,173 +0,0 @@
-Features
-========
-
-.. NOTE:: Features below are part of end-to-end test suite; You always could find most specific
- use cases of **pytest-bdd-ng** by investigation of its regression
- test suite https://github.com/elchupanebrej/pytest-bdd-ng/tree/default/tests
-
-
-
-Tutorial
---------
-
-Launch.feature
-##############
-
-.. include:: ../features/Tutorial/Launch.feature.md
- :code: markdown
-
-Step definition
----------------
-
-Pytest fixtures substitution.feature
-####################################
-
-.. include:: ../features/Step definition/Pytest fixtures substitution.feature.md
- :code: markdown
-
-Target fixtures specification.feature
-#####################################
-
-.. include:: ../features/Step definition/Target fixtures specification.feature.md
- :code: markdown
-
-Parameters
-##########
-
-Conversion.feature
-!!!!!!!!!!!!!!!!!!
-
-.. include:: ../features/Step definition/Parameters/Conversion.feature.md
- :code: markdown
-
-Defaults.feature
-!!!!!!!!!!!!!!!!
-
-.. include:: ../features/Step definition/Parameters/Defaults.feature.md
- :code: markdown
-
-Injection as fixtures.feature
-!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
-.. include:: ../features/Step definition/Parameters/Injection as fixtures.feature.md
- :code: markdown
-
-Parsing by custom parser.feature
-!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
-.. include:: ../features/Step definition/Parameters/Parsing by custom parser.feature.md
- :code: markdown
-
-Parsing.feature
-!!!!!!!!!!!!!!!
-
-.. include:: ../features/Step definition/Parameters/Parsing.feature.md
- :code: markdown
-
-Step
-----
-
-Data table.feature
-##################
-
-.. include:: ../features/Step/Data table.feature.md
- :code: markdown
-
-Doc string.feature
-##################
-
-.. include:: ../features/Step/Doc string.feature.md
- :code: markdown
-
-Step definition bounding.feature
-################################
-
-.. include:: ../features/Step/Step definition bounding.feature.md
- :code: markdown
-
-Scenario
---------
-
-Description.feature
-###################
-
-.. include:: ../features/Scenario/Description.feature.md
- :code: markdown
-
-Tag.feature
-###########
-
-.. include:: ../features/Scenario/Tag.feature.md
- :code: markdown
-
-Outline
-#######
-
-Examples Tag.feature
-!!!!!!!!!!!!!!!!!!!!
-
-.. include:: ../features/Scenario/Outline/Examples Tag.feature.md
- :code: markdown
-
-Report
-------
-
-Gathering.feature
-#################
-
-.. include:: ../features/Report/Gathering.feature.md
- :code: markdown
-
-Feature
--------
-
-Description.feature
-###################
-
-.. include:: ../features/Feature/Description.feature.md
- :code: markdown
-
-Localization.feature
-####################
-
-.. include:: ../features/Feature/Localization.feature.md
- :code: markdown
-
-Tag conversion.feature
-######################
-
-.. include:: ../features/Feature/Tag conversion.feature.md
- :code: markdown
-
-Tag.feature
-###########
-
-.. include:: ../features/Feature/Tag.feature.md
- :code: markdown
-
-Load
-####
-
-Autoload.feature
-!!!!!!!!!!!!!!!!
-
-.. include:: ../features/Feature/Load/Autoload.feature.md
- :code: markdown
-
-Scenario function loader.feature
-!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
-.. include:: ../features/Feature/Load/Scenario function loader.feature.md
- :code: markdown
-
-Scenario search from base directory.feature
-!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
-.. include:: ../features/Feature/Load/Scenario search from base directory.feature.md
- :code: markdown
-
-Scenario search from base url.feature
-!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
-.. include:: ../features/Feature/Load/Scenario search from base url.feature.md
- :code: markdown
diff --git a/docs/features/Feature/Description.feature.html b/docs/features/Feature/Description.feature.html
deleted file mode 100644
index 1951c6c5..00000000
--- a/docs/features/Feature/Description.feature.html
+++ /dev/null
@@ -1,44 +0,0 @@
-
Feature: Descriptions
-Free-form descriptions can be placed underneath Feature, Example/Scenario, Background, Scenario Outline and Rule.
-You can write anything you like, as long as no line starts with a keyword.
-Descriptions can be in the form of Markdown - formatters including the official HTML formatter support this.
-Scenario:
-
--
-
Given File "Description.feature" with content:
-Feature:
- My Feature description
- Scenario:
- Given I check feature description
-
-
--
-
And File "conftest.py" with content:
-from pytest_bdd import given
-
-@given('I check feature description')
-def step(feature):
- assert feature.description == "My Feature description"
-
-
--
-
When run pytest
-
--
-
Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-failed |
-
-
-
-
-1 |
-0 |
-
-
-
-
-
diff --git a/docs/features/Feature/Description.feature.rst b/docs/features/Feature/Description.feature.rst
new file mode 100644
index 00000000..8f316fa5
--- /dev/null
+++ b/docs/features/Feature/Description.feature.rst
@@ -0,0 +1,40 @@
+Feature: Descriptions
+^^^^^^^^^^^^^^^^^^^^^
+
+Free-form descriptions can be placed underneath Feature,
+Example/Scenario, Background, Scenario Outline and Rule. You can write
+anything you like, as long as no line starts with a keyword.
+Descriptions can be in the form of Markdown - formatters including the
+official HTML formatter support this.
+
+Scenario:
+'''''''''
+
+- Given File "Description.feature" with content:
+
+ .. code:: gherkin
+
+ Feature:
+ My Feature description
+ Scenario:
+ Given I check feature description
+
+- And File "conftest.py" with content:
+
+ .. code:: python
+
+ from pytest_bdd import given
+
+ @given('I check feature description')
+ def step(feature):
+ assert feature.description == "My Feature description"
+
+- When run pytest
+
+- Then pytest outcome must contain tests with statuses:
+
+ ====== ======
+ passed failed
+ ====== ======
+ 1 0
+ ====== ======
diff --git a/docs/features/Feature/Load/Autoload.feature.html b/docs/features/Feature/Load/Autoload.feature.html
deleted file mode 100644
index 8cd02236..00000000
--- a/docs/features/Feature/Load/Autoload.feature.html
+++ /dev/null
@@ -1,113 +0,0 @@
-Feature: Gherkin features autoload
-By default gherkin features are autoloaded and treated as usual pytest tests
-if are placed in the tests hierarchy proposed by pytest.
-This behavior could be disabled
-Rule: Feature autoload
-Background:
-
--
-
Given File "Passing.feature" with content:
-Feature: Passing feature
- Scenario: Passing scenario
- * Passing step
-
-
--
-
Given File "Another.passing.feature.md" with content:
-# Feature: Passing feature
-## Scenario: Passing scenario
-* Given Passing step
-
-
--
-
Given Install npm packages
-
-
-
-packages |
-@cucumber/gherkin |
-
-
-
-
--
-
Given File "conftest.py" with content:
-from pytest_bdd import step
-
-@step('Passing step')
-
-def _():
- ...
-
-
-
-Scenario: Feature is loaded by default
-
-- When run pytest
-- Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-
-
-
-
-2 |
-
-
-
-
-
-Scenario: Feature autoload could be disabled via command line
-
-Scenario: Feature autoload could be disabled via pytest.ini
-
diff --git a/docs/features/Feature/Load/Autoload.feature.rst b/docs/features/Feature/Load/Autoload.feature.rst
new file mode 100644
index 00000000..c231dc7c
--- /dev/null
+++ b/docs/features/Feature/Load/Autoload.feature.rst
@@ -0,0 +1,96 @@
+Feature: Gherkin features autoload
+''''''''''''''''''''''''''''''''''
+
+By default gherkin features are autoloaded and treated as usual pytest
+tests if are placed in the tests hierarchy proposed by pytest. This
+behavior could be disabled
+
+Rule: Feature autoload
+
+
+Background:
+
+
+- Given File "Passing.feature" with content:
+
+ .. code:: gherkin
+
+ Feature: Passing feature
+ Scenario: Passing scenario
+ * Passing step
+
+- Given File "Another.passing.feature.md" with content:
+
+ .. code:: markdown
+
+ # Feature: Passing feature
+ ## Scenario: Passing scenario
+ * Given Passing step
+
+- Given Install npm packages
+
+ ======== =================
+ packages @cucumber/gherkin
+ ======== =================
+ ======== =================
+
+- Given File "conftest.py" with content:
+
+ .. code:: python
+
+ from pytest_bdd import step
+
+ @step('Passing step')
+
+ def _():
+ ...
+
+Scenario: Feature is loaded by default
+
+
+- When run pytest
+- Then pytest outcome must contain tests with statuses:
+
+ +--------+
+ | passed |
+ +========+
+ | 2 |
+ +--------+
+
+Scenario: Feature autoload could be disabled via command line
+
+
+- When run pytest
+
+ ======== ==========================
+ cli_args --disable-feature-autoload
+ ======== ==========================
+ ======== ==========================
+
+- Then pytest outcome must contain tests with statuses:
+
+ +--------+
+ | passed |
+ +========+
+ | 0 |
+ +--------+
+
+Scenario: Feature autoload could be disabled via pytest.ini
+
+
+- Given Set pytest.ini content to:
+
+ .. code:: ini
+
+ [pytest]
+ disable_feature_autoload=true
+
+- When run pytest
+
+- Then pytest outcome must contain tests with statuses:
+
+ +--------+
+ | passed |
+ +========+
+ | 0 |
+ +--------+
diff --git a/docs/features/Feature/Load/Scenario function loader.feature.html b/docs/features/Feature/Load/Scenario function loader.feature.html
deleted file mode 100644
index 648a4e95..00000000
--- a/docs/features/Feature/Load/Scenario function loader.feature.html
+++ /dev/null
@@ -1,109 +0,0 @@
-Feature: Gherkin features load by scenario and scenarios functions
-Background:
-
-Scenario: "scenario" function is used as decorator
-
-- And File "test_scenario_load.py" with fixture templated content:
-
from pytest_bdd import scenario
-from pathlib import Path
-
-@scenario(Path(r"{tmp_path}") / "Passing.feature")
-def test_passing_feature():
- # It is however encouraged to try as much as possible to have your logic only inside the Given, When, Then steps.
- ...
-
-
-
-Scenario: "scenarios" function is used as decorator
-
-- And File "test_scenario_load.py" with fixture templated content:
-
from pytest_bdd import scenarios
-from pathlib import Path
-
-@scenarios(Path(r"{tmp_path}") / "Passing.feature", return_test_decorator=True)
-def test_passing_feature():
- # It is however encouraged to try as much as possible to have your logic only inside the Given, When, Then steps.
- ...
-
-
-- When run pytest
-- Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-
-
-
-
-1 |
-
-
-
-
-
-Scenario: "scenario" function is used to register feature as test
-
-Scenario: "scenarios" function is used to register feature as test
-
diff --git a/docs/features/Feature/Load/Scenario function loader.feature.rst b/docs/features/Feature/Load/Scenario function loader.feature.rst
new file mode 100644
index 00000000..268df29b
--- /dev/null
+++ b/docs/features/Feature/Load/Scenario function loader.feature.rst
@@ -0,0 +1,107 @@
+Feature: Gherkin features load by scenario and scenarios functions
+''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
+
+Background:
+
+
+- Given File "Passing.feature" in the temporary path with content:
+
+ .. code:: gherkin
+
+ Feature: Passing feature
+ Scenario: Passing scenario
+ Given Passing step
+
+- And File "conftest.py" with content:
+
+ .. code:: python
+
+ from pytest_bdd import step
+
+ @step('Passing step')
+ def _():
+ ...
+
+Scenario: "scenario" function is used as decorator
+
+
+- And File "test_scenario_load.py" with fixture templated content:
+
+ .. code:: python
+
+ from pytest_bdd import scenario
+ from pathlib import Path
+
+ @scenario(Path(r"{tmp_path}") / "Passing.feature")
+ def test_passing_feature():
+ # It is however encouraged to try as much as possible to have your logic only inside the Given, When, Then steps.
+ ...
+
+Scenario: "scenarios" function is used as decorator
+
+
+- And File "test_scenario_load.py" with fixture templated content:
+
+ .. code:: python
+
+ from pytest_bdd import scenarios
+ from pathlib import Path
+
+ @scenarios(Path(r"{tmp_path}") / "Passing.feature", return_test_decorator=True)
+ def test_passing_feature():
+ # It is however encouraged to try as much as possible to have your logic only inside the Given, When, Then steps.
+ ...
+
+- When run pytest
+
+- Then pytest outcome must contain tests with statuses:
+
+ +--------+
+ | passed |
+ +========+
+ | 1 |
+ +--------+
+
+Scenario: "scenario" function is used to register feature as test
+
+
+- And File "test_scenario_load.py" with fixture templated content:
+
+ .. code:: python
+
+ from pytest_bdd import scenario
+ from pathlib import Path
+
+ test_passing_feature = scenario(Path(r"{tmp_path}") / "Passing.feature", return_test_decorator=False)
+
+- When run pytest
+
+- Then pytest outcome must contain tests with statuses:
+
+ +--------+
+ | passed |
+ +========+
+ | 1 |
+ +--------+
+
+Scenario: "scenarios" function is used to register feature as test
+
+
+- And File "test_scenario_load.py" with fixture templated content:
+
+ .. code:: python
+
+ from pytest_bdd import scenarios
+ from pathlib import Path
+
+ test_passing_feature = scenarios(Path(r"{tmp_path}") / "Passing.feature")
+
+- When run pytest
+
+- Then pytest outcome must contain tests with statuses:
+
+ +--------+
+ | passed |
+ +========+
+ | 1 |
+ +--------+
diff --git a/docs/features/Feature/Load/Scenario search from base directory.feature.html b/docs/features/Feature/Load/Scenario search from base directory.feature.html
deleted file mode 100644
index 21b763c8..00000000
--- a/docs/features/Feature/Load/Scenario search from base directory.feature.html
+++ /dev/null
@@ -1,62 +0,0 @@
-Feature: Feature files search is started from base directory
-By default, pytest-bdd-ng will use current module's path as base path for finding feature files,
-but this behaviour can be changed in the pytest configuration file (i.e. pytest.ini
, tox.ini
or setup.cfg
)
-by declaring the new base path in the bdd_features_base_dir
key.
-The path is interpreted as relative to the pytest root directory.
-You can also override features base path on a per-scenario basis,
-in order to override the path for specific tests.
-Background:
-
-- Given File "Passing.feature" in the temporary path with content:
-
Feature: Passing feature
- Scenario: Passing scenario
- Given Passing step
- Scenario: Failing scenario
- Given Failing step
-
-
-- And File "conftest.py" with content:
-
from pytest_bdd import step
-from pytest_bdd.compatibility.pytest import fail
-
-@step('Passing step')
-def _():
- ...
-
-@step('Failing step')
-def _():
- fail('Intentional')
-
-
-- And File "test_feature.py" with content:
-
from pytest_bdd import scenarios
-
-test = scenarios('Passing.feature')
-
-
-
-Scenario:
-
diff --git a/docs/features/Feature/Load/Scenario search from base directory.feature.rst b/docs/features/Feature/Load/Scenario search from base directory.feature.rst
new file mode 100644
index 00000000..b1777a44
--- /dev/null
+++ b/docs/features/Feature/Load/Scenario search from base directory.feature.rst
@@ -0,0 +1,66 @@
+Feature: Feature files search is started from base directory
+''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
+
+By default, pytest-bdd-ng will use current module's path as base path
+for finding feature files, but this behaviour can be changed in the
+pytest configuration file (i.e. ``pytest.ini``, ``tox.ini`` or
+``setup.cfg``) by declaring the new base path in the
+``bdd_features_base_dir`` key. The path is interpreted as relative to
+the pytest root directory. You can also override features base path on a
+per-scenario basis, in order to override the path for specific tests.
+
+Background:
+
+
+- Given File "Passing.feature" in the temporary path with content:
+
+ .. code:: gherkin
+
+ Feature: Passing feature
+ Scenario: Passing scenario
+ Given Passing step
+ Scenario: Failing scenario
+ Given Failing step
+
+- And File "conftest.py" with content:
+
+ .. code:: python
+
+ from pytest_bdd import step
+ from pytest_bdd.compatibility.pytest import fail
+
+ @step('Passing step')
+ def _():
+ ...
+
+ @step('Failing step')
+ def _():
+ fail('Intentional')
+
+- And File "test_feature.py" with content:
+
+ .. code:: python
+
+ from pytest_bdd import scenarios
+
+ test = scenarios('Passing.feature')
+
+Scenario:
+
+
+- Given File "pytest.ini" with fixture templated content:
+
+ .. code:: ini
+
+ [pytest]
+ bdd_features_base_dir={tmp_path}
+
+- When run pytest
+
+- Then pytest outcome must contain tests with statuses:
+
+ ====== ======
+ passed failed
+ ====== ======
+ 1 1
+ ====== ======
diff --git a/docs/features/Feature/Load/Scenario search from base url.feature.html b/docs/features/Feature/Load/Scenario search from base url.feature.html
deleted file mode 100644
index c05d13bd..00000000
--- a/docs/features/Feature/Load/Scenario search from base url.feature.html
+++ /dev/null
@@ -1,62 +0,0 @@
-Feature files search is started from base directory
-By default, pytest-bdd-ng will use current module's path as base path for finding feature files,
-but this behaviour can be changed in the pytest configuration file (i.e. pytest.ini
, tox.ini
or setup.cfg
)
-by declaring the new base path in the bdd_features_base_dir
key.
-The path is interpreted as relative to the pytest root directory.
-You can also override features base path on a per-scenario basis,
-in order to override the path for specific tests.
-Background:
-
-- Given Localserver endpoint "/features/Passing.feature" responding content:
-
Feature: Passing feature
- Scenario: Passing scenario
- Given Passing step
- Scenario: Failing scenario
- Given Failing step
-
-
-- And File "conftest.py" with content:
-
from pytest_bdd import step
-from pytest_bdd.compatibility.pytest import fail
-
-@step('Passing step')
-def _():
- ...
-
-@step('Failing step')
-def _():
- fail('Intentional')
-
-
-- And File "test_feature.py" with content:
-
from pytest_bdd import scenarios,FeaturePathType
-
-test = scenarios('Passing.feature', features_path_type=FeaturePathType.URL)
-
-
-
-Scenario:
-
diff --git a/docs/features/Feature/Load/Scenario search from base url.feature.rst b/docs/features/Feature/Load/Scenario search from base url.feature.rst
new file mode 100644
index 00000000..5ad809ac
--- /dev/null
+++ b/docs/features/Feature/Load/Scenario search from base url.feature.rst
@@ -0,0 +1,67 @@
+Feature files search is started from base directory
+'''''''''''''''''''''''''''''''''''''''''''''''''''
+
+By default, pytest-bdd-ng will use current module's path as base path
+for finding feature files, but this behaviour can be changed in the
+pytest configuration file (i.e. ``pytest.ini``, ``tox.ini`` or
+``setup.cfg``) by declaring the new base path in the
+``bdd_features_base_dir`` key. The path is interpreted as relative to
+the pytest root directory. You can also override features base path on a
+per-scenario basis, in order to override the path for specific tests.
+
+Background:
+
+
+- Given Localserver endpoint "/features/Passing.feature" responding
+ content:
+
+ .. code:: gherkin
+
+ Feature: Passing feature
+ Scenario: Passing scenario
+ Given Passing step
+ Scenario: Failing scenario
+ Given Failing step
+
+- And File "conftest.py" with content:
+
+ .. code:: python
+
+ from pytest_bdd import step
+ from pytest_bdd.compatibility.pytest import fail
+
+ @step('Passing step')
+ def _():
+ ...
+
+ @step('Failing step')
+ def _():
+ fail('Intentional')
+
+- And File "test_feature.py" with content:
+
+ .. code:: python
+
+ from pytest_bdd import scenarios,FeaturePathType
+
+ test = scenarios('Passing.feature', features_path_type=FeaturePathType.URL)
+
+Scenario:
+
+
+- Given File "pytest.ini" with fixture templated content:
+
+ .. code:: ini
+
+ [pytest]
+ bdd_features_base_url=http://localhost:{httpserver_port}/features
+
+- When run pytest
+
+- Then pytest outcome must contain tests with statuses:
+
+ ====== ======
+ passed failed
+ ====== ======
+ 1 1
+ ====== ======
diff --git a/docs/features/Feature/Localization.feature.html b/docs/features/Feature/Localization.feature.html
deleted file mode 100644
index 4ef1cf53..00000000
--- a/docs/features/Feature/Localization.feature.html
+++ /dev/null
@@ -1,49 +0,0 @@
-Scenarios tags could be localized
-pytest-bdd-ng supports all localizations which
-Gherkin does: https://cucumber.io/docs/gherkin/languages/
-Scenario:
-
-- Given File "Localized.feature" with content:
-
#language: pt
-#encoding: UTF-8
-Funcionalidade: Login no Programa
- Cenário: O usuário ainda não é cadastrado
- Dado que o usuário esteja na tela de login
- Quando ele clicar no botão de Criar Conta
- Então ele deve ser levado para a tela de criação de conta
-
-
-- And File "conftest.py" with content:
-
from pytest_bdd import given, when, then
-
-@given("que o usuário esteja na tela de login")
-def tela_login():
- assert True
-
-@when("ele clicar no botão de Criar Conta")
-def evento_criar_conta():
- assert True
-
-@then("ele deve ser levado para a tela de criação de conta")
-def tela_criacao_conta():
- assert True
-
-
-- When run pytest
-- Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-failed |
-
-
-
-
-1 |
-0 |
-
-
-
-
-
diff --git a/docs/features/Feature/Localization.feature.rst b/docs/features/Feature/Localization.feature.rst
new file mode 100644
index 00000000..8f2630e5
--- /dev/null
+++ b/docs/features/Feature/Localization.feature.rst
@@ -0,0 +1,48 @@
+Scenarios tags could be localized
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+pytest-bdd-ng supports all localizations which Gherkin does:
+https://cucumber.io/docs/gherkin/languages/
+
+Scenario:
+'''''''''
+
+- Given File "Localized.feature" with content:
+
+ .. code:: gherkin
+
+ #language: pt
+ #encoding: UTF-8
+ Funcionalidade: Login no Programa
+ CenГЎrio: O usuГЎrio ainda nГЈo Г© cadastrado
+ Dado que o usuГЎrio esteja na tela de login
+ Quando ele clicar no botГЈo de Criar Conta
+ Então ele deve ser levado para a tela de criação de conta
+
+- And File "conftest.py" with content:
+
+ .. code:: python
+
+ from pytest_bdd import given, when, then
+
+ @given("que o usuГЎrio esteja na tela de login")
+ def tela_login():
+ assert True
+
+ @when("ele clicar no botГЈo de Criar Conta")
+ def evento_criar_conta():
+ assert True
+
+ @then("ele deve ser levado para a tela de criação de conta")
+ def tela_criacao_conta():
+ assert True
+
+- When run pytest
+
+- Then pytest outcome must contain tests with statuses:
+
+ ====== ======
+ passed failed
+ ====== ======
+ 1 0
+ ====== ======
diff --git a/docs/features/Feature/Tag conversion.feature.html b/docs/features/Feature/Tag conversion.feature.html
deleted file mode 100644
index bec533b7..00000000
--- a/docs/features/Feature/Tag conversion.feature.html
+++ /dev/null
@@ -1,52 +0,0 @@
-Feature: Scenarios tags could be converted via hooks
-Scenario:
-
-- Given File "Passed.feature" with content:
-
Feature:
- @todo
- Scenario: Failed
- Given I produce failed test
-
- Scenario: Passed
- Given I produce passed test
-
-
-- And File "conftest.py" with content:
-
import pytest
-from pytest_bdd import given
-from pytest_bdd.compatibility.pytest import fail
-
-def pytest_bdd_convert_tag_to_marks(feature, scenario, tag):
- if tag == 'todo':
- marker = pytest.mark.skip(reason="Not implemented yet")
- return [marker]
-
-@given('I produce passed test')
-def passing_step():
- ...
-
-@given('I produce failed test')
-def failing_step():
- fail('Enforce fail')
-
-
-- When run pytest
-- Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-failed |
-skipped |
-
-
-
-
-1 |
-0 |
-1 |
-
-
-
-
-
diff --git a/docs/features/Feature/Tag conversion.feature.rst b/docs/features/Feature/Tag conversion.feature.rst
new file mode 100644
index 00000000..1ea51329
--- /dev/null
+++ b/docs/features/Feature/Tag conversion.feature.rst
@@ -0,0 +1,48 @@
+Feature: Scenarios tags could be converted via hooks
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Scenario:
+'''''''''
+
+- Given File "Passed.feature" with content:
+
+ .. code:: gherkin
+
+ Feature:
+ @todo
+ Scenario: Failed
+ Given I produce failed test
+
+ Scenario: Passed
+ Given I produce passed test
+
+- And File "conftest.py" with content:
+
+ .. code:: python
+
+ import pytest
+ from pytest_bdd import given
+ from pytest_bdd.compatibility.pytest import fail
+
+ def pytest_bdd_convert_tag_to_marks(feature, scenario, tag):
+ if tag == 'todo':
+ marker = pytest.mark.skip(reason="Not implemented yet")
+ return [marker]
+
+ @given('I produce passed test')
+ def passing_step():
+ ...
+
+ @given('I produce failed test')
+ def failing_step():
+ fail('Enforce fail')
+
+- When run pytest
+
+- Then pytest outcome must contain tests with statuses:
+
+ ====== ====== =======
+ passed failed skipped
+ ====== ====== =======
+ 1 0 1
+ ====== ====== =======
diff --git a/docs/features/Feature/Tag.feature.html b/docs/features/Feature/Tag.feature.html
deleted file mode 100644
index 1ae4049f..00000000
--- a/docs/features/Feature/Tag.feature.html
+++ /dev/null
@@ -1,236 +0,0 @@
-Feature: Features could be tagged
-For picking up tests to run we can use
-tests selection <http://pytest.org/latest/usage.html#specifying-tests-selecting-tests>
_ technique.
-The problem is that you have to know how your tests are organized,
-knowing only the feature files organization is not enough.
-cucumber tags <https://github.com/cucumber/cucumber/wiki/Tags>
_ introduces standard way of
-categorizing your features and scenarios
-Rule:
-Background:
-
-- Given File "Passed.feature" with content:
-
@passed
-Feature: Steps are executed by corresponding step keyword decorator
- Scenario: Passed
- Given I produce passed test
-
-
-- Given File "Failed.feature" with content:
-
@failed
-Feature: Steps are executed by corresponding step keyword decorator
- Scenario: Failed
- Given I produce failed test
-
-
-- Given File "Both.feature" with content:
-
@both
-Feature: Steps are executed by corresponding step keyword decorator
- Scenario: Passed
- Given I produce passed test
-
- Scenario: Failed
- Given I produce failed test
-
-
-- Given File "pytest.ini" with content:
-
[pytest]
-markers =
- passed
- failed
- both
-
-
-- And File "conftest.py" with content:
-
from pytest_bdd.compatibility.pytest import fail
-from pytest_bdd import given
-
-@given('I produce passed test')
-def passing_step():
- ...
-
-@given('I produce failed test')
-def failing_step():
- fail('Enforce fail')
-
-
-
-Scenario:
-
-Scenario:
-
-Scenario:
-
-Scenario:
-
-Scenario:
-
-Scenario:
-
-- When run pytest
-- Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-failed |
-
-
-
-
-2 |
-2 |
-
-
-
-
-
diff --git a/docs/features/Feature/Tag.feature.rst b/docs/features/Feature/Tag.feature.rst
new file mode 100644
index 00000000..67b43893
--- /dev/null
+++ b/docs/features/Feature/Tag.feature.rst
@@ -0,0 +1,172 @@
+Feature: Features could be tagged
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+For picking up tests to run we can use
+``tests selection ``\ \_
+technique. The problem is that you have to know how your tests are
+organized, knowing only the feature files organization is not enough.
+``cucumber tags ``\ \_
+introduces standard way of categorizing your features and scenarios
+
+Rule:
+'''''
+
+Background:
+
+
+- Given File "Passed.feature" with content:
+
+ .. code:: gherkin
+
+ @passed
+ Feature: Steps are executed by corresponding step keyword decorator
+ Scenario: Passed
+ Given I produce passed test
+
+- Given File "Failed.feature" with content:
+
+ .. code:: gherkin
+
+ @failed
+ Feature: Steps are executed by corresponding step keyword decorator
+ Scenario: Failed
+ Given I produce failed test
+
+- Given File "Both.feature" with content:
+
+ .. code:: gherkin
+
+ @both
+ Feature: Steps are executed by corresponding step keyword decorator
+ Scenario: Passed
+ Given I produce passed test
+
+ Scenario: Failed
+ Given I produce failed test
+
+- Given File "pytest.ini" with content:
+
+ .. code:: ini
+
+ [pytest]
+ markers =
+ passed
+ failed
+ both
+
+- And File "conftest.py" with content:
+
+ .. code:: python
+
+ from pytest_bdd.compatibility.pytest import fail
+ from pytest_bdd import given
+
+ @given('I produce passed test')
+ def passing_step():
+ ...
+
+ @given('I produce failed test')
+ def failing_step():
+ fail('Enforce fail')
+
+Scenario:
+
+
+- When run pytest
+
+ ======== == ======
+ cli_args -m passed
+ ======== == ======
+ ======== == ======
+
+- Then pytest outcome must contain tests with statuses:
+
+ ====== ======
+ passed failed
+ ====== ======
+ 1 0
+ ====== ======
+
+Scenario:
+
+
+- When run pytest
+
+ ======== == ======
+ cli_args -m failed
+ ======== == ======
+ ======== == ======
+
+- Then pytest outcome must contain tests with statuses:
+
+ ====== ======
+ passed failed
+ ====== ======
+ 0 1
+ ====== ======
+
+Scenario:
+
+
+- When run pytest
+
+ ======== == ================
+ cli_args -m passed or failed
+ ======== == ================
+ ======== == ================
+
+- Then pytest outcome must contain tests with statuses:
+
+ ====== ======
+ passed failed
+ ====== ======
+ 1 1
+ ====== ======
+
+Scenario:
+
+
+- When run pytest
+
+ ======== == ========
+ cli_args -m not both
+ ======== == ========
+ ======== == ========
+
+- Then pytest outcome must contain tests with statuses:
+
+ ====== ======
+ passed failed
+ ====== ======
+ 1 1
+ ====== ======
+
+Scenario:
+
+
+- When run pytest
+
+ ======== == ====
+ cli_args -m both
+ ======== == ====
+ ======== == ====
+
+- Then pytest outcome must contain tests with statuses:
+
+ ====== ======
+ passed failed
+ ====== ======
+ 1 1
+ ====== ======
+
+Scenario:
+
+
+- When run pytest
+- Then pytest outcome must contain tests with statuses:
+
+ ====== ======
+ passed failed
+ ====== ======
+ 2 2
+ ====== ======
diff --git a/docs/features/Report/Gathering.feature.html b/docs/features/Report/Gathering.feature.html
deleted file mode 100644
index d85fb94b..00000000
--- a/docs/features/Report/Gathering.feature.html
+++ /dev/null
@@ -1,87 +0,0 @@
-Feature:
-Background:
-
-Scenario: NDJson(JSONL) could be produced on the feature run
-Output file could be fed into other @cucumber tools for more verbose report
-[Messages](https://github.com/cucumber/messages)
-
-
-Scenario: HTML report could be produced on the feature run
-Dummy reporter based on [@cucumber/html-formatter](https://github.com/cucumber/html-formatter)
-
-
diff --git a/docs/features/Report/Gathering.feature.rst b/docs/features/Report/Gathering.feature.rst
new file mode 100644
index 00000000..8f37407c
--- /dev/null
+++ b/docs/features/Report/Gathering.feature.rst
@@ -0,0 +1,67 @@
+Feature:
+^^^^^^^^
+
+Background:
+'''''''''''
+
+- Given File "Passing.feature" with content:
+
+ .. code:: gherkin
+
+ Feature: Passing feature
+ Scenario: Passing scenario
+ Given Passing step
+
+- And File "conftest.py" with content:
+
+ .. code:: python
+
+ from pytest_bdd import step
+
+ @step('Passing step')
+ def _():
+ ...
+
+Scenario: NDJson(JSONL) could be produced on the feature run
+''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
+
+::
+
+ Output file could be fed into other @cucumber tools for more verbose report
+ [Messages](https://github.com/cucumber/messages)
+
+- When run pytest
+
+ ========== ================= ==========
+ cli_args --messages-ndjson out.ndjson
+ ========== ================= ==========
+ subprocess true
+ ========== ================= ==========
+
+- Then File "out.ndjson" has "15" lines
+
+- Then Report "out.ndjson" parsable into messages
+
+Scenario: HTML report could be produced on the feature run
+''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
+
+::
+
+ Dummy reporter based on [@cucumber/html-formatter](https://github.com/cucumber/html-formatter)
+
+- Given Install npm packages
+
+ ======== ========================
+ packages @cucumber/html-formatter
+ ======== ========================
+ ======== ========================
+
+- When run pytest
+
+ ========== =============== ========
+ cli_args --cucumber-html out.html
+ ========== =============== ========
+ subprocess true
+ ========== =============== ========
+
+- Then File "out.html" is not empty
diff --git a/docs/features/Scenario/Description.feature.html b/docs/features/Scenario/Description.feature.html
deleted file mode 100644
index a762a550..00000000
--- a/docs/features/Scenario/Description.feature.html
+++ /dev/null
@@ -1,42 +0,0 @@
-Feature: Descriptions
-Free-form descriptions can be placed
-underneath Feature, Example/Scenario, Background, Scenario Outline and Rule.
-You can write anything you like, as long as no line starts with a keyword.
-Descriptions can be in the form of Markdown - formatters including
-the official HTML formatter support this.
-Scenario:
-
-- Given File "Description.feature" with content:
-
Feature:
- Scenario:
- My Scenario description
-
- Given I check scenario description
-
-
-- And File "conftest.py" with content:
-
from pytest_bdd import given
-
-@given('I check scenario description')
-def step(scenario):
- assert "My Scenario description" in scenario.description
-
-
-- When run pytest
-- Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-failed |
-
-
-
-
-1 |
-0 |
-
-
-
-
-
diff --git a/docs/features/Scenario/Description.feature.rst b/docs/features/Scenario/Description.feature.rst
new file mode 100644
index 00000000..d05a37d0
--- /dev/null
+++ b/docs/features/Scenario/Description.feature.rst
@@ -0,0 +1,41 @@
+Feature: Descriptions
+^^^^^^^^^^^^^^^^^^^^^
+
+Free-form descriptions can be placed underneath Feature,
+Example/Scenario, Background, Scenario Outline and Rule. You can write
+anything you like, as long as no line starts with a keyword.
+Descriptions can be in the form of Markdown - formatters including the
+official HTML formatter support this.
+
+Scenario:
+'''''''''
+
+- Given File "Description.feature" with content:
+
+ .. code:: gherkin
+
+ Feature:
+ Scenario:
+ My Scenario description
+
+ Given I check scenario description
+
+- And File "conftest.py" with content:
+
+ .. code:: python
+
+ from pytest_bdd import given
+
+ @given('I check scenario description')
+ def step(scenario):
+ assert "My Scenario description" in scenario.description
+
+- When run pytest
+
+- Then pytest outcome must contain tests with statuses:
+
+ ====== ======
+ passed failed
+ ====== ======
+ 1 0
+ ====== ======
diff --git a/docs/features/Scenario/Outline/Examples Tag.feature.html b/docs/features/Scenario/Outline/Examples Tag.feature.html
deleted file mode 100644
index 0f868bc6..00000000
--- a/docs/features/Scenario/Outline/Examples Tag.feature.html
+++ /dev/null
@@ -1,501 +0,0 @@
-Feature: Scenario Outline examples could be tagged
-Rule:
-Background:
-
-- Given File "steps.feature" with content:
-
Feature: Steps are executed by corresponding step keyword decorator
-
- Scenario Outline:
- Given I produce <outcome> test
-
- @passed
- Examples:
- |outcome|
- |passed |
-
- @failed
- Examples:
- |outcome|
- |failed |
-
- @both
- Examples:
- |outcome|
- |passed |
- |failed |
-
-
-- Given File "pytest.ini" with content:
-
[pytest]
-markers =
- passed
- failed
- both
-
-
-- And File "conftest.py" with content:
-
from pytest_bdd.compatibility.pytest import fail
-from pytest_bdd import given
-
-@given('I produce passed test')
-def passing_step():
- ...
-
-@given('I produce failed test')
-def failing_step():
- fail('Enforce fail')
-
-
-
-Scenario:
-
-Scenario:
-
-Scenario:
-
-Scenario:
-
-Scenario:
-
-Scenario:
-
-- When run pytest
-- Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-failed |
-
-
-
-
-2 |
-2 |
-
-
-
-
-
-Rule: Mixing tags on feature & examples level
-Background:
-
-- Given File "steps.feature" with content:
-
@feature_tag
-Feature: Steps are executed by corresponding step keyword decorator
- Scenario Outline:
- Given I produce <outcome> test
-
- Examples:
- |outcome|
- |passed |
-
- @examples_tag
- Examples:
- |outcome|
- |failed |
-
-
-- Given File "pytest.ini" with content:
-
[pytest]
-markers =
- feature_tag
- examples_tag
-
-
-- And File "conftest.py" with content:
-
from pytest_bdd.compatibility.pytest import fail
-from pytest_bdd import given
-
-@given('I produce passed test')
-def passing_step():
- ...
-
-@given('I produce failed test')
-def failing_step():
- fail('Enforce fail')
-
-
-
-Example:
-
-Example:
-
-Example:
-
-Example:
-
-Example:
-
-Example:
-
-Example:
-
-Example:
-
diff --git a/docs/features/Scenario/Outline/Examples Tag.feature.rst b/docs/features/Scenario/Outline/Examples Tag.feature.rst
new file mode 100644
index 00000000..68cc42b7
--- /dev/null
+++ b/docs/features/Scenario/Outline/Examples Tag.feature.rst
@@ -0,0 +1,348 @@
+Feature: Scenario Outline examples could be tagged
+''''''''''''''''''''''''''''''''''''''''''''''''''
+
+Rule:
+
+
+Background:
+
+
+- Given File "steps.feature" with content:
+
+ .. code:: gherkin
+
+ Feature: Steps are executed by corresponding step keyword decorator
+
+ Scenario Outline:
+ Given I produce test
+
+ @passed
+ Examples:
+ |outcome|
+ |passed |
+
+ @failed
+ Examples:
+ |outcome|
+ |failed |
+
+ @both
+ Examples:
+ |outcome|
+ |passed |
+ |failed |
+
+- Given File "pytest.ini" with content:
+
+ .. code:: ini
+
+ [pytest]
+ markers =
+ passed
+ failed
+ both
+
+- And File "conftest.py" with content:
+
+ .. code:: python
+
+ from pytest_bdd.compatibility.pytest import fail
+ from pytest_bdd import given
+
+ @given('I produce passed test')
+ def passing_step():
+ ...
+
+ @given('I produce failed test')
+ def failing_step():
+ fail('Enforce fail')
+
+Scenario:
+
+
+- When run pytest
+
+ ======== == ======
+ cli_args -m passed
+ ======== == ======
+ ======== == ======
+
+- Then pytest outcome must contain tests with statuses:
+
+ ====== ======
+ passed failed
+ ====== ======
+ 1 0
+ ====== ======
+
+Scenario:
+
+
+- When run pytest
+
+ ======== == ======
+ cli_args -m failed
+ ======== == ======
+ ======== == ======
+
+- Then pytest outcome must contain tests with statuses:
+
+ ====== ======
+ passed failed
+ ====== ======
+ 0 1
+ ====== ======
+
+Scenario:
+
+
+- When run pytest
+
+ ======== == ================
+ cli_args -m passed or failed
+ ======== == ================
+ ======== == ================
+
+- Then pytest outcome must contain tests with statuses:
+
+ ====== ======
+ passed failed
+ ====== ======
+ 1 1
+ ====== ======
+
+Scenario:
+
+
+- When run pytest
+
+ ======== == ========
+ cli_args -m not both
+ ======== == ========
+ ======== == ========
+
+- Then pytest outcome must contain tests with statuses:
+
+ ====== ======
+ passed failed
+ ====== ======
+ 1 1
+ ====== ======
+
+Scenario:
+
+
+- When run pytest
+
+ ======== == ====
+ cli_args -m both
+ ======== == ====
+ ======== == ====
+
+- Then pytest outcome must contain tests with statuses:
+
+ ====== ======
+ passed failed
+ ====== ======
+ 1 1
+ ====== ======
+
+Scenario:
+
+
+- When run pytest
+- Then pytest outcome must contain tests with statuses:
+
+ ====== ======
+ passed failed
+ ====== ======
+ 2 2
+ ====== ======
+
+Rule: Mixing tags on feature & examples level
+
+
+Background:
+
+
+- Given File "steps.feature" with content:
+
+ .. code:: gherkin
+
+ @feature_tag
+ Feature: Steps are executed by corresponding step keyword decorator
+ Scenario Outline:
+ Given I produce test
+
+ Examples:
+ |outcome|
+ |passed |
+
+ @examples_tag
+ Examples:
+ |outcome|
+ |failed |
+
+- Given File "pytest.ini" with content:
+
+ .. code:: ini
+
+ [pytest]
+ markers =
+ feature_tag
+ examples_tag
+
+- And File "conftest.py" with content:
+
+ .. code:: python
+
+ from pytest_bdd.compatibility.pytest import fail
+ from pytest_bdd import given
+
+ @given('I produce passed test')
+ def passing_step():
+ ...
+
+ @given('I produce failed test')
+ def failing_step():
+ fail('Enforce fail')
+
+Example:
+
+
+- When run pytest
+
+ ======== == ===========
+ cli_args -m feature_tag
+ ======== == ===========
+ ======== == ===========
+
+- Then pytest outcome must contain tests with statuses:
+
+ ====== ======
+ passed failed
+ ====== ======
+ 1 1
+ ====== ======
+
+Example:
+
+
+- When run pytest
+
+ ======== == ============
+ cli_args -m examples_tag
+ ======== == ============
+ ======== == ============
+
+- Then pytest outcome must contain tests with statuses:
+
+ ====== ======
+ passed failed
+ ====== ======
+ 0 1
+ ====== ======
+
+Example:
+
+
+- When run pytest
+
+ ======== == ===============
+ cli_args -m not feature_tag
+ ======== == ===============
+ ======== == ===============
+
+- Then pytest outcome must contain tests with statuses:
+
+ ====== ======
+ passed failed
+ ====== ======
+ 0 0
+ ====== ======
+
+Example:
+
+
+- When run pytest
+
+ ======== == ================
+ cli_args -m not examples_tag
+ ======== == ================
+ ======== == ================
+
+- Then pytest outcome must contain tests with statuses:
+
+ ====== ======
+ passed failed
+ ====== ======
+ 1 0
+ ====== ======
+
+Example:
+
+
+- When run pytest
+
+ ======== == =========== ==============
+ cli_args -m feature_tag --collect-only
+ ======== == =========== ==============
+ ======== == =========== ==============
+
+- Then pytest outcome must match lines:
+
+ +-------------------+
+ | collected 2 items |
+ +===================+
+ +-------------------+
+
+Example:
+
+
+- When run pytest
+
+ ======== == ============ ==============
+ cli_args -m examples_tag --collect-only
+ ======== == ============ ==============
+ ======== == ============ ==============
+
+- Then pytest outcome must match lines:
+
+ +-----------------------------------------------+
+ | collected 2 items / 1 deselected / 1 selected |
+ +===============================================+
+ +-----------------------------------------------+
+
+Example:
+
+
+- When run pytest
+
+ ======== == =============== ==============
+ cli_args -m not feature_tag --collect-only
+ ======== == =============== ==============
+ ======== == =============== ==============
+
+- Then pytest outcome must match lines:
+
+ +------------------------------------+
+ | collected 2 items / 2 deselected\* |
+ +====================================+
+ +------------------------------------+
+
+Example:
+
+
+- When run pytest
+
+ ======== == ================ ==============
+ cli_args -m not examples_tag --collect-only
+ ======== == ================ ==============
+ ======== == ================ ==============
+
+- Then pytest outcome must match lines:
+
+ +-----------------------------------------------+
+ | collected 2 items / 1 deselected / 1 selected |
+ +===============================================+
+ +-----------------------------------------------+
diff --git a/docs/features/Scenario/Tag.feature.html b/docs/features/Scenario/Tag.feature.html
deleted file mode 100644
index de5264b2..00000000
--- a/docs/features/Scenario/Tag.feature.html
+++ /dev/null
@@ -1,224 +0,0 @@
-Feature: Scenarios could be tagged
-Background:
-
-- Given File "steps.feature" with content:
-
Feature: Steps are executed by corresponding step keyword decorator
- @passed
- Scenario: Passed
- Given I produce passed test
-
- @failed
- Scenario: Failed
- Given I produce failed test
-
- @both
- Rule:
- Scenario: Passed
- Given I produce passed test
-
- Scenario: Failed
- Given I produce failed test
-
-
-- Given File "pytest.ini" with content:
-
[pytest]
-markers =
- passed
- failed
- both
-
-
-- And File "conftest.py" with content:
-
from pytest_bdd.compatibility.pytest import fail
-from pytest_bdd import given
-
-@given('I produce passed test')
-def passing_step():
- ...
-
-@given('I produce failed test')
-def failing_step():
- fail('Enforce fail')
-
-
-
-Scenario:
-
-Scenario:
-
-Scenario:
-
-Scenario:
-
-Scenario:
-
-Scenario:
-
-- When run pytest
-- Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-failed |
-
-
-
-
-2 |
-2 |
-
-
-
-
-
diff --git a/docs/features/Scenario/Tag.feature.rst b/docs/features/Scenario/Tag.feature.rst
new file mode 100644
index 00000000..1897aed1
--- /dev/null
+++ b/docs/features/Scenario/Tag.feature.rst
@@ -0,0 +1,153 @@
+Feature: Scenarios could be tagged
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Background:
+'''''''''''
+
+- Given File "steps.feature" with content:
+
+ .. code:: gherkin
+
+ Feature: Steps are executed by corresponding step keyword decorator
+ @passed
+ Scenario: Passed
+ Given I produce passed test
+
+ @failed
+ Scenario: Failed
+ Given I produce failed test
+
+ @both
+ Rule:
+ Scenario: Passed
+ Given I produce passed test
+
+ Scenario: Failed
+ Given I produce failed test
+
+- Given File "pytest.ini" with content:
+
+ .. code:: ini
+
+ [pytest]
+ markers =
+ passed
+ failed
+ both
+
+- And File "conftest.py" with content:
+
+ .. code:: python
+
+ from pytest_bdd.compatibility.pytest import fail
+ from pytest_bdd import given
+
+ @given('I produce passed test')
+ def passing_step():
+ ...
+
+ @given('I produce failed test')
+ def failing_step():
+ fail('Enforce fail')
+
+Scenario:
+'''''''''
+
+- When run pytest
+
+ ======== == ======
+ cli_args -m passed
+ ======== == ======
+ ======== == ======
+
+- Then pytest outcome must contain tests with statuses:
+
+ ====== ======
+ passed failed
+ ====== ======
+ 1 0
+ ====== ======
+
+Scenario:
+'''''''''
+
+- When run pytest
+
+ ======== == ======
+ cli_args -m failed
+ ======== == ======
+ ======== == ======
+
+- Then pytest outcome must contain tests with statuses:
+
+ ====== ======
+ passed failed
+ ====== ======
+ 0 1
+ ====== ======
+
+Scenario:
+'''''''''
+
+- When run pytest
+
+ ======== == ================
+ cli_args -m passed or failed
+ ======== == ================
+ ======== == ================
+
+- Then pytest outcome must contain tests with statuses:
+
+ ====== ======
+ passed failed
+ ====== ======
+ 1 1
+ ====== ======
+
+Scenario:
+'''''''''
+
+- When run pytest
+
+ ======== == ========
+ cli_args -m not both
+ ======== == ========
+ ======== == ========
+
+- Then pytest outcome must contain tests with statuses:
+
+ ====== ======
+ passed failed
+ ====== ======
+ 1 1
+ ====== ======
+
+Scenario:
+'''''''''
+
+- When run pytest
+
+ ======== == ====
+ cli_args -m both
+ ======== == ====
+ ======== == ====
+
+- Then pytest outcome must contain tests with statuses:
+
+ ====== ======
+ passed failed
+ ====== ======
+ 1 1
+ ====== ======
+
+Scenario:
+'''''''''
+
+- When run pytest
+- Then pytest outcome must contain tests with statuses:
+
+ ====== ======
+ passed failed
+ ====== ======
+ 2 2
+ ====== ======
diff --git a/docs/features/Step definition/Parameters/Conversion.feature.html b/docs/features/Step definition/Parameters/Conversion.feature.html
deleted file mode 100644
index db062cf3..00000000
--- a/docs/features/Step definition/Parameters/Conversion.feature.html
+++ /dev/null
@@ -1,124 +0,0 @@
-Feature: Step definitions parameters conversion
-There is possibility to pass argument converters which may be useful
-if you need to postprocess step arguments after the parser.
-Background:
-
-Scenario: for non-anonymous groups
-
-Rule: for anonymous groups
-Step definitions parameters could not have a name, so
-we have to name them before conversion
-
-Scenario:
-
-- Given File "conftest.py" with content:
-
from enum import Enum
-from pytest_bdd import given
-from re import compile as parse
-
-class Item(Enum):
- CUCUMBER = 'cucumber'
-
-@given(
- parse(r"I have a (\w+)"),
- anonymous_group_names=('item',),
- converters=dict(item=Item)
-)
-def i_have_item(item):
- assert item == Item.CUCUMBER
-
-
-- When run pytest
-- Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-
-
-
-
-1 |
-
-
-
-
-
-Scenario:
-
-- Given File "conftest.py" with content:
-
from enum import Enum
-from pytest_bdd import given
-from functools import partial
-from cucumber_expressions.expression import CucumberExpression
-from cucumber_expressions.parameter_type_registry import ParameterTypeRegistry
-
-parse = partial(
- CucumberExpression,
- parameter_type_registry = ParameterTypeRegistry()
-)
-
-class Item(Enum):
- CUCUMBER = 'cucumber'
-
-@given(
- parse(r"I have a {word}"),
- anonymous_group_names=('item',),
- converters=dict(item=Item)
-)
-def i_have_item(item):
- assert item == Item.CUCUMBER
-
-
-- When run pytest
-- Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-
-
-
-
-1 |
-
-
-
-
-
diff --git a/docs/features/Step definition/Parameters/Conversion.feature.rst b/docs/features/Step definition/Parameters/Conversion.feature.rst
new file mode 100644
index 00000000..c52c653f
--- /dev/null
+++ b/docs/features/Step definition/Parameters/Conversion.feature.rst
@@ -0,0 +1,123 @@
+Feature: Step definitions parameters conversion
+'''''''''''''''''''''''''''''''''''''''''''''''
+
+There is possibility to pass argument converters which may be useful if
+you need to postprocess step arguments after the parser.
+
+Background:
+
+
+- Given File "Example.feature" with content:
+
+ .. code:: gherkin
+
+ Feature:
+ Scenario:
+ Given I have a cucumber
+
+Scenario: for non-anonymous groups
+
+
+- Given File "conftest.py" with content:
+
+ .. code:: python
+
+ from enum import Enum
+ from pytest_bdd import given
+ from re import compile as parse
+
+ class Item(Enum):
+ CUCUMBER = 'cucumber'
+
+ @given(parse(r"I have a (?P- \w+)"), converters=dict(item=Item))
+ def i_have_item(item):
+ assert item == Item.CUCUMBER
+
+- When run pytest
+
+- Then pytest outcome must contain tests with statuses:
+
+ +--------+
+ | passed |
+ +========+
+ | 1 |
+ +--------+
+
+Rule: for anonymous groups
+
+
+::
+
+ Step definitions parameters could not have a name, so
+ we have to name them before conversion
+
+Scenario:
+
+
+- Given File "conftest.py" with content:
+
+ .. code:: python
+
+ from enum import Enum
+ from pytest_bdd import given
+ from re import compile as parse
+
+ class Item(Enum):
+ CUCUMBER = 'cucumber'
+
+ @given(
+ parse(r"I have a (\w+)"),
+ anonymous_group_names=('item',),
+ converters=dict(item=Item)
+ )
+ def i_have_item(item):
+ assert item == Item.CUCUMBER
+
+- When run pytest
+
+- Then pytest outcome must contain tests with statuses:
+
+ +--------+
+ | passed |
+ +========+
+ | 1 |
+ +--------+
+
+Scenario:
+
+
+- Given File "conftest.py" with content:
+
+ .. code:: python
+
+ from enum import Enum
+ from pytest_bdd import given
+ from functools import partial
+ from cucumber_expressions.expression import CucumberExpression
+ from cucumber_expressions.parameter_type_registry import ParameterTypeRegistry
+
+ parse = partial(
+ CucumberExpression,
+ parameter_type_registry = ParameterTypeRegistry()
+ )
+
+ class Item(Enum):
+ CUCUMBER = 'cucumber'
+
+ @given(
+ parse(r"I have a {word}"),
+ anonymous_group_names=('item',),
+ converters=dict(item=Item)
+ )
+ def i_have_item(item):
+ assert item == Item.CUCUMBER
+
+- When run pytest
+
+- Then pytest outcome must contain tests with statuses:
+
+ +--------+
+ | passed |
+ +========+
+ | 1 |
+ +--------+
diff --git a/docs/features/Step definition/Parameters/Defaults.feature.html b/docs/features/Step definition/Parameters/Defaults.feature.html
deleted file mode 100644
index bcfb05e4..00000000
--- a/docs/features/Step definition/Parameters/Defaults.feature.html
+++ /dev/null
@@ -1,53 +0,0 @@
-
Feature: Step definitions parameters could have default values
-Scenario:
-
-- Given File "Example.feature" with content:
-
Feature:
- Scenario:
- Given I have a cucumber
- Given I have a rotten cucumber
- Given I have a fresh cucumber
- Given I have a pickle
-
-
-- Given File "conftest.py" with content:
-
from enum import Enum
-from re import compile as parse
-from pytest import fixture
-from pytest_bdd import given
-
-class Freshness(Enum):
- FRESH = 'fresh'
- ROTTEN = 'rotten'
- SALTED = 'salted'
-
-@fixture
-def oracle_freshness():
- return [Freshness.FRESH, Freshness.ROTTEN, Freshness.FRESH, Freshness.SALTED]
-
-@given("I have a pickle", param_defaults=dict(freshness=Freshness.SALTED))
-@given(
- parse(r"I have a ((?P<freshness>\w+)\s)?cucumber"),
- converters=dict(freshness=Freshness),
- param_defaults=dict(freshness=Freshness.FRESH)
-)
-def i_have_cucumber(freshness, oracle_freshness):
- assert freshness == oracle_freshness.pop(0)
-
-
-- When run pytest
-- Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-
-
-
-
-1 |
-
-
-
-
-
diff --git a/docs/features/Step definition/Parameters/Defaults.feature.rst b/docs/features/Step definition/Parameters/Defaults.feature.rst
new file mode 100644
index 00000000..31a23c1d
--- /dev/null
+++ b/docs/features/Step definition/Parameters/Defaults.feature.rst
@@ -0,0 +1,53 @@
+Feature: Step definitions parameters could have default values
+''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
+
+Scenario:
+
+
+- Given File "Example.feature" with content:
+
+ .. code:: gherkin
+
+ Feature:
+ Scenario:
+ Given I have a cucumber
+ Given I have a rotten cucumber
+ Given I have a fresh cucumber
+ Given I have a pickle
+
+- Given File "conftest.py" with content:
+
+ .. code:: python
+
+ from enum import Enum
+ from re import compile as parse
+ from pytest import fixture
+ from pytest_bdd import given
+
+ class Freshness(Enum):
+ FRESH = 'fresh'
+ ROTTEN = 'rotten'
+ SALTED = 'salted'
+
+ @fixture
+ def oracle_freshness():
+ return [Freshness.FRESH, Freshness.ROTTEN, Freshness.FRESH, Freshness.SALTED]
+
+ @given("I have a pickle", param_defaults=dict(freshness=Freshness.SALTED))
+ @given(
+ parse(r"I have a ((?P\w+)\s)?cucumber"),
+ converters=dict(freshness=Freshness),
+ param_defaults=dict(freshness=Freshness.FRESH)
+ )
+ def i_have_cucumber(freshness, oracle_freshness):
+ assert freshness == oracle_freshness.pop(0)
+
+- When run pytest
+
+- Then pytest outcome must contain tests with statuses:
+
+ +--------+
+ | passed |
+ +========+
+ | 1 |
+ +--------+
diff --git a/docs/features/Step definition/Parameters/Injection as fixtures.feature.html b/docs/features/Step definition/Parameters/Injection as fixtures.feature.html
deleted file mode 100644
index 91347933..00000000
--- a/docs/features/Step definition/Parameters/Injection as fixtures.feature.html
+++ /dev/null
@@ -1,300 +0,0 @@
-Feature: Step definitions parameters injection as fixtures
-Step arguments are injected into step context and could be used as normal
-fixtures with the names equal to the names of the arguments by default.
-Step's argument are accessible as a fixture in other step function just
-by mentioning it as an argument
-If the name of the step argument clashes with existing fixture,
-it will be overridden by step's argument value.
-Value for some fixture deeply inside of the fixture tree could be set/override
-in a ad-hoc way by just choosing the proper name for the step argument.
-Scenario: Step parameters are injected as fixtures by default
-
--
-
Given File "conftest.py" with content:
-from re import compile as parse
-from pytest_bdd import given, then
-
-@given("I have a pickle", param_defaults=dict(freshness='salted'))
-@given(
- parse(r"I have a ((?P<freshness>\w+)\s)?cucumber"),
- param_defaults=dict(freshness='fresh')
-)
-def i_have_cucumber(freshness):
- ...
-
-@then("Taste of cucumber is salt")
-def i_check_salted_cucumber(freshness):
- assert freshness=='salted'
-
-
--
-
Given File "Freshness.feature" with content:
-Feature:
- Scenario:
- Given I have a salted cucumber
- Then Taste of cucumber is salt
-
-
--
-
Given File "test_freshness.py" with content:
-from enum import Enum
-from pytest import fixture
-from pytest_bdd import scenario
-class Freshness(Enum):
- FRESH = 'fresh'
- ROTTEN = 'rotten'
- SALTED = 'salted'
-
-@fixture
-def oracle_freshness():
- return Freshness.SALTED
-
-@scenario("Freshness.feature")
-def test_passing_feature(request, oracle_freshness):
- assert Freshness(request.getfixturevalue('freshness'))==oracle_freshness
-
-@scenario("Freshness.feature")
-def test_another_passing_feature(freshness, oracle_freshness):
- assert Freshness(freshness)==oracle_freshness
-
-
--
-
When run pytest
-
-
-
-cli_args |
---disable-feature-autoload |
-
-
-
-
--
-
Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-
-
-
-
-2 |
-
-
-
-
-
-Scenario: Step parameters injection as fixtures could be disabled
-
--
-
Given File "conftest.py" with content:
-from re import compile as parse
-from pytest_bdd import given, then
-
-@given(
- "I have a pickle",
- param_defaults=dict(freshness='salted'),
- params_fixtures_mapping={...:None},
- target_fixtures=['cuke_taste']
-)
-@given(
- parse(r"I have a ((?P<freshness>\w+)\s)?cucumber"),
- param_defaults=dict(freshness='fresh'),
- params_fixtures_mapping=False,
- target_fixture='cuke_taste'
-)
-def i_have_cucumber(freshness):
- assert freshness is not None
- yield freshness
-
-@then("Taste of cucumber is salt")
-def i_check_salted_cucumber(cuke_taste):
- assert cuke_taste=='salted'
-
-
--
-
Given File "Freshness.feature" with content:
-Feature:
- Scenario:
- Given I have a pickle
- Then Taste of cucumber is salt
-
-
--
-
Given File "test_freshness.py" with content:
-import pytest
-from pytest_bdd import scenario
-from pytest_bdd.compatibility.pytest import FixtureLookupError
-@scenario("Freshness.feature")
-def test_passing_feature(request, cuke_taste):
- assert cuke_taste == 'salted'
- with pytest.raises(FixtureLookupError):
- request.getfixturevalue('freshness')
-
-
--
-
When run pytest
-
-
-
-cli_args |
---disable-feature-autoload |
-
-
-
-
--
-
Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-
-
-
-
-1 |
-
-
-
-
-
-Scenario: Step parameters renaming on injection as fixtures
-
--
-
Given File "conftest.py" with content:
-from re import compile as parse
-from pytest_bdd import given, then
-
-@given(
- "I have a pickle",
- param_defaults=dict(freshness='salted'),
- params_fixtures_mapping={"freshness":"cuke_taste"}
-)
-@given(
- parse(r"I have a ((?P<freshness>\w+)\s)?cucumber"),
- param_defaults=dict(freshness='fresh'),
- params_fixtures_mapping={"freshness":"cuke_taste"}
-)
-def i_have_cucumber(cuke_taste, freshness):
- assert cuke_taste is not None
- assert freshness == cuke_taste
- yield cuke_taste
-
-@then("Taste of cucumber is salt")
-def i_check_salted_cucumber(cuke_taste):
- assert cuke_taste=='salted'
-
-
--
-
Given File "Freshness.feature" with content:
-Feature:
- Scenario:
- Given I have a pickle
- Then Taste of cucumber is salt
-
-
--
-
Given File "test_freshness.py" with content:
-import pytest
-from pytest_bdd import scenario
-from pytest_bdd.compatibility.pytest import FixtureLookupError
-
-@scenario("Freshness.feature")
-def test_passing_feature(request, cuke_taste):
- assert cuke_taste == 'salted'
- with pytest.raises(FixtureLookupError):
- request.getfixturevalue('freshness')
-
-
--
-
When run pytest
-
-
-
-cli_args |
---disable-feature-autoload |
-
-
-
-
--
-
Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-
-
-
-
-1 |
-
-
-
-
-
-Scenario: Only allowed step parameters injection as fixtures
-
--
-
Given File "conftest.py" with content:
-from pytest_bdd import given
-
-@given(
- "I have an old pickle",
- param_defaults=dict(freshness='salted', age='old'),
- params_fixtures_mapping={"freshness"}
-)
-def i_have_cucumber(age, freshness):
- assert age == 'old'
- assert freshness == 'salted'
-
-
--
-
Given File "Freshness.feature" with content:
-Feature:
- Scenario:
- Given I have an old pickle
-
-
--
-
Given File "test_freshness.py" with content:
-import pytest
-from pytest_bdd import scenario
-from pytest_bdd.compatibility.pytest import FixtureLookupError
-
-@scenario("Freshness.feature")
-def test_passing_feature(request, freshness):
- assert freshness == 'salted'
- with pytest.raises(FixtureLookupError):
- request.getfixturevalue('age')
-
-
--
-
When run pytest
-
-
-
-cli_args |
---disable-feature-autoload |
-
-
-
-
--
-
Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-
-
-
-
-1 |
-
-
-
-
-
diff --git a/docs/features/Step definition/Parameters/Injection as fixtures.feature.rst b/docs/features/Step definition/Parameters/Injection as fixtures.feature.rst
new file mode 100644
index 00000000..c1d5478b
--- /dev/null
+++ b/docs/features/Step definition/Parameters/Injection as fixtures.feature.rst
@@ -0,0 +1,273 @@
+Feature: Step definitions parameters injection as fixtures
+''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
+
+Step arguments are injected into step context and could be used as
+normal fixtures with the names equal to the names of the arguments by
+default.
+
+Step's argument are accessible as a fixture in other step function just
+by mentioning it as an argument
+
+If the name of the step argument clashes with existing fixture, it will
+be overridden by step's argument value. Value for some fixture deeply
+inside of the fixture tree could be set/override in a ad-hoc way by just
+choosing the proper name for the step argument.
+
+Scenario: Step parameters are injected as fixtures by default
+
+
+- Given File "conftest.py" with content:
+
+ .. code:: python
+
+ from re import compile as parse
+ from pytest_bdd import given, then
+
+ @given("I have a pickle", param_defaults=dict(freshness='salted'))
+ @given(
+ parse(r"I have a ((?P\w+)\s)?cucumber"),
+ param_defaults=dict(freshness='fresh')
+ )
+ def i_have_cucumber(freshness):
+ ...
+
+ @then("Taste of cucumber is salt")
+ def i_check_salted_cucumber(freshness):
+ assert freshness=='salted'
+
+- Given File "Freshness.feature" with content:
+
+ .. code:: gherkin
+
+ Feature:
+ Scenario:
+ Given I have a salted cucumber
+ Then Taste of cucumber is salt
+
+- Given File "test_freshness.py" with content:
+
+ .. code:: python
+
+ from enum import Enum
+ from pytest import fixture
+ from pytest_bdd import scenario
+ class Freshness(Enum):
+ FRESH = 'fresh'
+ ROTTEN = 'rotten'
+ SALTED = 'salted'
+
+ @fixture
+ def oracle_freshness():
+ return Freshness.SALTED
+
+ @scenario("Freshness.feature")
+ def test_passing_feature(request, oracle_freshness):
+ assert Freshness(request.getfixturevalue('freshness'))==oracle_freshness
+
+ @scenario("Freshness.feature")
+ def test_another_passing_feature(freshness, oracle_freshness):
+ assert Freshness(freshness)==oracle_freshness
+
+- When run pytest
+
+ ======== ==========================
+ cli_args --disable-feature-autoload
+ ======== ==========================
+ ======== ==========================
+
+- Then pytest outcome must contain tests with statuses:
+
+ +--------+
+ | passed |
+ +========+
+ | 2 |
+ +--------+
+
+Scenario: Step parameters injection as fixtures could be disabled
+
+
+- Given File "conftest.py" with content:
+
+ .. code:: python
+
+ from re import compile as parse
+ from pytest_bdd import given, then
+
+ @given(
+ "I have a pickle",
+ param_defaults=dict(freshness='salted'),
+ params_fixtures_mapping={...:None},
+ target_fixtures=['cuke_taste']
+ )
+ @given(
+ parse(r"I have a ((?P\w+)\s)?cucumber"),
+ param_defaults=dict(freshness='fresh'),
+ params_fixtures_mapping=False,
+ target_fixture='cuke_taste'
+ )
+ def i_have_cucumber(freshness):
+ assert freshness is not None
+ yield freshness
+
+ @then("Taste of cucumber is salt")
+ def i_check_salted_cucumber(cuke_taste):
+ assert cuke_taste=='salted'
+
+- Given File "Freshness.feature" with content:
+
+ .. code:: gherkin
+
+ Feature:
+ Scenario:
+ Given I have a pickle
+ Then Taste of cucumber is salt
+
+- Given File "test_freshness.py" with content:
+
+ .. code:: python
+
+ import pytest
+ from pytest_bdd import scenario
+ from pytest_bdd.compatibility.pytest import FixtureLookupError
+ @scenario("Freshness.feature")
+ def test_passing_feature(request, cuke_taste):
+ assert cuke_taste == 'salted'
+ with pytest.raises(FixtureLookupError):
+ request.getfixturevalue('freshness')
+
+- When run pytest
+
+ ======== ==========================
+ cli_args --disable-feature-autoload
+ ======== ==========================
+ ======== ==========================
+
+- Then pytest outcome must contain tests with statuses:
+
+ +--------+
+ | passed |
+ +========+
+ | 1 |
+ +--------+
+
+Scenario: Step parameters renaming on injection as fixtures
+
+
+- Given File "conftest.py" with content:
+
+ .. code:: python
+
+ from re import compile as parse
+ from pytest_bdd import given, then
+
+ @given(
+ "I have a pickle",
+ param_defaults=dict(freshness='salted'),
+ params_fixtures_mapping={"freshness":"cuke_taste"}
+ )
+ @given(
+ parse(r"I have a ((?P\w+)\s)?cucumber"),
+ param_defaults=dict(freshness='fresh'),
+ params_fixtures_mapping={"freshness":"cuke_taste"}
+ )
+ def i_have_cucumber(cuke_taste, freshness):
+ assert cuke_taste is not None
+ assert freshness == cuke_taste
+ yield cuke_taste
+
+ @then("Taste of cucumber is salt")
+ def i_check_salted_cucumber(cuke_taste):
+ assert cuke_taste=='salted'
+
+- Given File "Freshness.feature" with content:
+
+ .. code:: gherkin
+
+ Feature:
+ Scenario:
+ Given I have a pickle
+ Then Taste of cucumber is salt
+
+- Given File "test_freshness.py" with content:
+
+ .. code:: python
+
+ import pytest
+ from pytest_bdd import scenario
+ from pytest_bdd.compatibility.pytest import FixtureLookupError
+
+ @scenario("Freshness.feature")
+ def test_passing_feature(request, cuke_taste):
+ assert cuke_taste == 'salted'
+ with pytest.raises(FixtureLookupError):
+ request.getfixturevalue('freshness')
+
+- When run pytest
+
+ ======== ==========================
+ cli_args --disable-feature-autoload
+ ======== ==========================
+ ======== ==========================
+
+- Then pytest outcome must contain tests with statuses:
+
+ +--------+
+ | passed |
+ +========+
+ | 1 |
+ +--------+
+
+Scenario: Only allowed step parameters injection as fixtures
+
+
+- Given File "conftest.py" with content:
+
+ .. code:: python
+
+ from pytest_bdd import given
+
+ @given(
+ "I have an old pickle",
+ param_defaults=dict(freshness='salted', age='old'),
+ params_fixtures_mapping={"freshness"}
+ )
+ def i_have_cucumber(age, freshness):
+ assert age == 'old'
+ assert freshness == 'salted'
+
+- Given File "Freshness.feature" with content:
+
+ .. code:: gherkin
+
+ Feature:
+ Scenario:
+ Given I have an old pickle
+
+- Given File "test_freshness.py" with content:
+
+ .. code:: python
+
+ import pytest
+ from pytest_bdd import scenario
+ from pytest_bdd.compatibility.pytest import FixtureLookupError
+
+ @scenario("Freshness.feature")
+ def test_passing_feature(request, freshness):
+ assert freshness == 'salted'
+ with pytest.raises(FixtureLookupError):
+ request.getfixturevalue('age')
+
+- When run pytest
+
+ ======== ==========================
+ cli_args --disable-feature-autoload
+ ======== ==========================
+ ======== ==========================
+
+- Then pytest outcome must contain tests with statuses:
+
+ +--------+
+ | passed |
+ +========+
+ | 1 |
+ +--------+
diff --git a/docs/features/Step definition/Parameters/Parsing by custom parser.feature.html b/docs/features/Step definition/Parameters/Parsing by custom parser.feature.html
deleted file mode 100644
index 16184e8a..00000000
--- a/docs/features/Step definition/Parameters/Parsing by custom parser.feature.html
+++ /dev/null
@@ -1,62 +0,0 @@
-Feature: Step definitions parameters parsing by custom parser
-Scenario:
-
-- Given File "Example.feature" with content:
-
Feature:
- Scenario:
- Given there are 10 cucumbers
-
-
-- Given File "conftest.py" with content:
-
import re
-from pytest_bdd import given, parsers
-
-class Parser(parsers.StepParser):
- def __init__(self, name, *args,**kwargs):
- self.name = name
- self.regex = re.compile(
- re.sub("%(.+)%", r"(?P<\1>.+)", name),
- *args,
- **kwargs
- )
-
- def parse_arguments(self, request, name, **kwargs):
- __doc__ = "Parse step arguments"
- return self.regex.match(name).groupdict()
-
- @property
- def arguments(self):
- return [*self.regex.groupindex.keys()]
-
- def is_matching(self, request ,name):
- __doc__ = "Match given name with the step name."
- return bool(self.regex.match(name))
-
- def __str__(self):
- return self.name
-
-@given(
- Parser("there are %start% cucumbers"),
- target_fixture="start_cucumbers",
- converters=dict(start=int)
-)
-def start_cucumbers(start):
- assert start == 10
-
-
-- When run pytest
-- Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-
-
-
-
-1 |
-
-
-
-
-
diff --git a/docs/features/Step definition/Parameters/Parsing by custom parser.feature.rst b/docs/features/Step definition/Parameters/Parsing by custom parser.feature.rst
new file mode 100644
index 00000000..117731f3
--- /dev/null
+++ b/docs/features/Step definition/Parameters/Parsing by custom parser.feature.rst
@@ -0,0 +1,62 @@
+Feature: Step definitions parameters parsing by custom parser
+'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
+
+Scenario:
+
+
+- Given File "Example.feature" with content:
+
+ .. code:: gherkin
+
+ Feature:
+ Scenario:
+ Given there are 10 cucumbers
+
+- Given File "conftest.py" with content:
+
+ .. code:: python
+
+ import re
+ from pytest_bdd import given, parsers
+
+ class Parser(parsers.StepParser):
+ def __init__(self, name, *args,**kwargs):
+ self.name = name
+ self.regex = re.compile(
+ re.sub("%(.+)%", r"(?P<\1>.+)", name),
+ *args,
+ **kwargs
+ )
+
+ def parse_arguments(self, request, name, **kwargs):
+ __doc__ = "Parse step arguments"
+ return self.regex.match(name).groupdict()
+
+ @property
+ def arguments(self):
+ return [*self.regex.groupindex.keys()]
+
+ def is_matching(self, request ,name):
+ __doc__ = "Match given name with the step name."
+ return bool(self.regex.match(name))
+
+ def __str__(self):
+ return self.name
+
+ @given(
+ Parser("there are %start% cucumbers"),
+ target_fixture="start_cucumbers",
+ converters=dict(start=int)
+ )
+ def start_cucumbers(start):
+ assert start == 10
+
+- When run pytest
+
+- Then pytest outcome must contain tests with statuses:
+
+ +--------+
+ | passed |
+ +========+
+ | 1 |
+ +--------+
diff --git a/docs/features/Step definition/Parameters/Parsing.feature.html b/docs/features/Step definition/Parameters/Parsing.feature.html
deleted file mode 100644
index 54215f50..00000000
--- a/docs/features/Step definition/Parameters/Parsing.feature.html
+++ /dev/null
@@ -1,439 +0,0 @@
-Feature: Step definitions parameters parsing
-Step parameters often enable the reuse of steps,
-which can reduce the amount of code required.
-This methodology allows for the same step
-to be used multiple times within a single scenario,
-but with different arguments.
-There are an multiple step parameter parsers available for your use.
-Rule: Step definitions parameters parsing
-Background:
-
-Example: Heuristic parser guesses a type and builds particular parser to be applied
- Tries to select right parser between string, cucumber_expression, cfparse and re.
- Any object that supports `__str__` interface and does not support parser interface
- will be wrapped with this parser
-
-
-- Given File "conftest.py" with content:
-
import pytest
-from pytest_bdd import given, when, then
-
-@pytest.fixture
-def values():
- return [6, 3, 2, 1, 999999]
-
-# string parser
-@given("I have a wallet", param_defaults={'wallet': 'wallet'})
-def i_have_wallet(wallet):
- assert wallet == 'wallet'
-
-# cucumber expressions parser
-@given("I have {int} Euro",
- anonymous_group_names=('euro',),
- converters=dict(euro=int)
-)
-def i_have(euro, values):
- assert euro == values.pop(0)
-
-# parse parser
-@when(
- "I pay {} Euro",
- anonymous_group_names=('euro',),
- converters=dict(euro=int)
-)
-def i_pay(euro, values):
- assert euro == values.pop(0)
-
-# cfparse parser
-@when("I lose {euro:d} Euro", converters=dict(euro=int))
-def i_lose(euro, values):
- assert euro == values.pop(0)
-
-# regular expression parser
-@then(
- r"I should have (\d+) Euro",
- anonymous_group_names=('euro',),
- converters=dict(euro=int)
-)
-def i_should_have(euro, values):
- assert euro == values.pop(0)
-
-
-- When run pytest
-- Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-
-
-
-
-1 |
-
-
-
-
-
-Example: by "parse"
- http://pypi.python.org/pypi/parse
-
- Provides a simple parser that replaces regular expressions for
- step parameters with a readable syntax like ``{param:Type}``.
- The syntax is inspired by the Python builtin ``string.format()``
- function.
- Step parameters must use the named fields syntax of pypi_parse_
- in step definitions. The named fields are extracted,
- optionally type converted and then used as step function arguments.
- Supports type conversions by using type converters passed via `extra_types`
-
-
-- Given File "conftest.py" with content:
-
import pytest
-from pytest_bdd import given, when, then
-from parse import Parser as parse
-
-@pytest.fixture
-def values():
- return [6, 3, 2, 1, 999999]
-
-@given(parse("I have a wallet"), param_defaults={'wallet': 'wallet'})
-def i_have_wallet(wallet):
- assert wallet == 'wallet'
-
-@given(parse("I have {euro:g} Euro"))
-def i_have(euro, values):
- assert euro == values.pop(0)
-
-@when(parse("I pay {euro:d} Euro"))
-def i_pay(euro, values):
- assert euro == values.pop(0)
-
-@when(
- parse("I lose {} Euro"),
- anonymous_group_names=('euro',),
- converters=dict(euro=int)
-)
-def i_pay(euro, values):
- assert euro == values.pop(0)
-
-@then(
- parse(r"I should have {:d} Euro"),
- anonymous_group_names=('euro',),
- converters=dict(euro=int)
-)
-def i_should_have(euro, values):
- assert euro == values.pop(0)
-
-
-- When run pytest
-- Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-
-
-
-
-1 |
-
-
-
-
-
-Example: by "cfparse"
- http://pypi.python.org/pypi/parse_type
-
- Provides an extended parser with "Cardinality Field" (CF) support.
- Automatically creates missing type converters for related cardinality
- as long as a type converter for cardinality=1 is provided.
- Supports parse expressions like:
- ``{values:Type+}`` (cardinality=1..N, many)
- ``{values:Type*}`` (cardinality=0..N, many0)
- ``{value:Type?}`` (cardinality=0..1, optional)
- Supports type conversions (as above).
-
-
-- Given File "conftest.py" with content:
-
import pytest
-from pytest_bdd import given, when, then
-from parse_type.cfparse import Parser as parse
-
-@pytest.fixture
-def values():
- return [6, 3, 2, 1, 999999]
-
-@given(parse("I have a wallet"), param_defaults={'wallet': 'wallet'})
-def i_have_wallet(wallet):
- assert wallet == 'wallet'
-
-@given(parse("I have {euro:Number} Euro", extra_types=dict(Number=int)))
-def i_have(euro, values):
- assert euro == values.pop(0)
-
-@when(parse("I pay {euro:d} Euro"))
-def i_pay(euro, values):
- assert euro == values.pop(0)
-
-@when(
- parse("I lose {} Euro"),
- anonymous_group_names=('euro',),
- converters=dict(euro=int)
-)
-def i_pay(euro, values):
- assert euro == values.pop(0)
-
-@then(
- parse(r"I should have {:d} Euro"),
- anonymous_group_names=('euro',),
- converters=dict(euro=int)
-)
-def i_should_have(euro, values):
- assert euro == values.pop(0)
-
-
-- When run pytest
-- Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-
-
-
-
-1 |
-
-
-
-
-
-Example: by "cucumber-expressions"
- https://github.com/cucumber/cucumber-expressions
- Cucumber Expressions is an alternative to Regular Expressions
- with a more intuitive syntax.
-
-
-- And File "conftest.py" with content:
-
from functools import partial
-import pytest
-from pytest_bdd import given, when, then
-from cucumber_expressions.parameter_type_registry import ParameterTypeRegistry
-from cucumber_expressions.expression import CucumberExpression
-
-parse = partial(
- CucumberExpression,
- parameter_type_registry = ParameterTypeRegistry()
-)
-
-@pytest.fixture
-def values():
- return [6, 3, 2, 1, 999999]
-
-@given(parse("I have a wallet"), param_defaults={'wallet': 'wallet'})
-def i_have_wallet(wallet):
- assert wallet == 'wallet'
-
-@given(
- parse("I have {int} Euro"),
- anonymous_group_names=('euro',),
- converters=dict(euro=int)
-)
-def i_have(euro, values):
- assert euro == values.pop(0)
-
-@when(
- parse("I pay {} Euro"),
- anonymous_group_names=('euro',),
- converters=dict(euro=int)
-)
-def i_pay(euro, values, request):
- assert euro == values.pop(0)
-
-@when(
- parse(r"I lose {int} Dollar/Euro(s)"),
- anonymous_group_names=('euro',),
- converters=dict(euro=int)
-)
-def i_lose(euro, values):
- assert euro == values.pop(0)
-
-@then(
- parse("I should have {int} Euro"),
- anonymous_group_names=('euro',),
- converters=dict(euro=int)
-)
-def i_should_have(euro, values):
- assert euro == values.pop(0)
-
-
-- When run pytest
-- Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-
-
-
-
-1 |
-
-
-
-
-
-Example: by "cucumber-regular-expressions"
-https://github.com/cucumber/cucumber-expressions
-
-Cucumber Expressions is an alternative
-to Regular Expressions with a more intuitive syntax.
-
-
-- And File "conftest.py" with content:
-
import pytest
-from pytest_bdd import given, when, then
-from functools import partial
-
-from cucumber_expressions.parameter_type_registry import ParameterTypeRegistry
-from cucumber_expressions.regular_expression import (
- RegularExpression as CucumberRegularExpression
-)
-
-parse = partial(
- CucumberRegularExpression,
- parameter_type_registry = ParameterTypeRegistry()
-)
-
-@pytest.fixture
-def values():
- return [6, 3, 2, 1, 999999]
-
-@given(parse("I have a wallet"), param_defaults={'wallet': 'wallet'})
-def i_have_wallet(wallet):
- assert wallet == 'wallet'
-
-@given(
- parse(r"I have (\d+) Euro"),
- anonymous_group_names=('euro',),
- converters=dict(euro=int)
-)
-def i_have(euro, values):
- assert euro == values.pop(0)
-
-@when(
- parse("I pay (.*) Euro"),
- anonymous_group_names=('euro',),
- converters=dict(euro=int)
-)
-def i_pay(euro, values, request):
- assert euro == values.pop(0)
-
-@when(
- parse(r"I lose (.+) Euro"),
- anonymous_group_names=('euro',),
- converters=dict(euro=int)
-)
-def i_lose(euro, values):
- assert euro == values.pop(0)
-
-@then(
- parse(r"I should have (\d+) Euro"),
- anonymous_group_names=('euro',),
- converters=dict(euro=int)
-)
-def i_should_have(euro, values):
- assert euro == values.pop(0)
-
-
-- When run pytest
-- Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-
-
-
-
-1 |
-
-
-
-
-
-Example: by "regular-expressions"
-This uses full regular expressions to parse the clause text. You will
-need to use named groups "(?P<name>...)" to define the variables pulled
-from the text and passed to your "step()" function.
-Type conversion can only be done via "converters" step decorator
-argument (see example in according feature).
-
-
-- Given File "conftest.py" with content:
-
import pytest
-from pytest_bdd import given, when, then
-from re import compile as parse
-
-@pytest.fixture
-def values():
- return [6, 3, 2, 1, 999999]
-
-@given(parse("I have a wallet"), param_defaults={'wallet': 'wallet'})
-def i_have_wallet(wallet):
- assert wallet == 'wallet'
-
-@given(parse(r"I have (?P<euro>\d+) Euro"), converters=dict(euro=int))
-def i_have(euro, values):
- assert euro == values.pop(0)
-
-@when(
- parse(r"I pay (\d+) Euro"),
- anonymous_group_names=('euro',),
- converters=dict(euro=int)
-)
-def i_pay(euro, values):
- assert euro == values.pop(0)
-
-@when(parse(r"I lose (.+) Euro"),
- anonymous_group_names=('euro',),
- converters=dict(euro=int)
-)
-def i_lose(euro, values):
- assert euro == values.pop(0)
-
-@then(parse(r"I should have (?P<euro>\d+) Euro"), converters=dict(euro=int))
-def i_should_have(euro, values):
- assert euro == values.pop(0)
-
-
-- When run pytest
-- Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-
-
-
-
-1 |
-
-
-
-
-
diff --git a/docs/features/Step definition/Parameters/Parsing.feature.rst b/docs/features/Step definition/Parameters/Parsing.feature.rst
new file mode 100644
index 00000000..2e7f63ca
--- /dev/null
+++ b/docs/features/Step definition/Parameters/Parsing.feature.rst
@@ -0,0 +1,438 @@
+Feature: Step definitions parameters parsing
+''''''''''''''''''''''''''''''''''''''''''''
+
+Step parameters often enable the reuse of steps, which can reduce the
+amount of code required. This methodology allows for the same step to be
+used multiple times within a single scenario, but with different
+arguments. There are an multiple step parameter parsers available for
+your use.
+
+Rule: Step definitions parameters parsing
+
+
+Background:
+
+
+- Given File "Parametrized.feature" with content:
+
+ .. code:: gherkin
+
+ Feature: StepHandler arguments
+ Scenario: Every step takes a parameter with the same name
+ Given I have a wallet
+ Given I have 6 Euro
+ When I lose 3 Euro
+ And I pay 2 Euro
+ Then I should have 1 Euro
+ # In my dream...
+ And I should have 999999 Euro
+
+Example: Heuristic parser guesses a type and builds particular parser to be applied
+
+
+::
+
+ Tries to select right parser between string, cucumber_expression, cfparse and re.
+ Any object that supports `__str__` interface and does not support parser interface
+ will be wrapped with this parser
+
+- Given File "conftest.py" with content:
+
+ .. code:: python
+
+ import pytest
+ from pytest_bdd import given, when, then
+
+ @pytest.fixture
+ def values():
+ return [6, 3, 2, 1, 999999]
+
+ # string parser
+ @given("I have a wallet", param_defaults={'wallet': 'wallet'})
+ def i_have_wallet(wallet):
+ assert wallet == 'wallet'
+
+ # cucumber expressions parser
+ @given("I have {int} Euro",
+ anonymous_group_names=('euro',),
+ converters=dict(euro=int)
+ )
+ def i_have(euro, values):
+ assert euro == values.pop(0)
+
+ # parse parser
+ @when(
+ "I pay {} Euro",
+ anonymous_group_names=('euro',),
+ converters=dict(euro=int)
+ )
+ def i_pay(euro, values):
+ assert euro == values.pop(0)
+
+ # cfparse parser
+ @when("I lose {euro:d} Euro", converters=dict(euro=int))
+ def i_lose(euro, values):
+ assert euro == values.pop(0)
+
+ # regular expression parser
+ @then(
+ r"I should have (\d+) Euro",
+ anonymous_group_names=('euro',),
+ converters=dict(euro=int)
+ )
+ def i_should_have(euro, values):
+ assert euro == values.pop(0)
+
+- When run pytest
+
+- Then pytest outcome must contain tests with statuses:
+
+ +--------+
+ | passed |
+ +========+
+ | 1 |
+ +--------+
+
+Example: by "parse"
+
+
+::
+
+ http://pypi.python.org/pypi/parse
+
+ Provides a simple parser that replaces regular expressions for
+ step parameters with a readable syntax like ``{param:Type}``.
+ The syntax is inspired by the Python builtin ``string.format()``
+ function.
+ Step parameters must use the named fields syntax of pypi_parse_
+ in step definitions. The named fields are extracted,
+ optionally type converted and then used as step function arguments.
+ Supports type conversions by using type converters passed via `extra_types`
+
+- Given File "conftest.py" with content:
+
+ .. code:: python
+
+ import pytest
+ from pytest_bdd import given, when, then
+ from parse import Parser as parse
+
+ @pytest.fixture
+ def values():
+ return [6, 3, 2, 1, 999999]
+
+ @given(parse("I have a wallet"), param_defaults={'wallet': 'wallet'})
+ def i_have_wallet(wallet):
+ assert wallet == 'wallet'
+
+ @given(parse("I have {euro:g} Euro"))
+ def i_have(euro, values):
+ assert euro == values.pop(0)
+
+ @when(parse("I pay {euro:d} Euro"))
+ def i_pay(euro, values):
+ assert euro == values.pop(0)
+
+ @when(
+ parse("I lose {} Euro"),
+ anonymous_group_names=('euro',),
+ converters=dict(euro=int)
+ )
+ def i_pay(euro, values):
+ assert euro == values.pop(0)
+
+ @then(
+ parse(r"I should have {:d} Euro"),
+ anonymous_group_names=('euro',),
+ converters=dict(euro=int)
+ )
+ def i_should_have(euro, values):
+ assert euro == values.pop(0)
+
+- When run pytest
+
+- Then pytest outcome must contain tests with statuses:
+
+ +--------+
+ | passed |
+ +========+
+ | 1 |
+ +--------+
+
+Example: by "cfparse"
+
+
+::
+
+ http://pypi.python.org/pypi/parse_type
+
+ Provides an extended parser with "Cardinality Field" (CF) support.
+ Automatically creates missing type converters for related cardinality
+ as long as a type converter for cardinality=1 is provided.
+ Supports parse expressions like:
+ ``{values:Type+}`` (cardinality=1..N, many)
+ ``{values:Type*}`` (cardinality=0..N, many0)
+ ``{value:Type?}`` (cardinality=0..1, optional)
+ Supports type conversions (as above).
+
+- Given File "conftest.py" with content:
+
+ .. code:: python
+
+ import pytest
+ from pytest_bdd import given, when, then
+ from parse_type.cfparse import Parser as parse
+
+ @pytest.fixture
+ def values():
+ return [6, 3, 2, 1, 999999]
+
+ @given(parse("I have a wallet"), param_defaults={'wallet': 'wallet'})
+ def i_have_wallet(wallet):
+ assert wallet == 'wallet'
+
+ @given(parse("I have {euro:Number} Euro", extra_types=dict(Number=int)))
+ def i_have(euro, values):
+ assert euro == values.pop(0)
+
+ @when(parse("I pay {euro:d} Euro"))
+ def i_pay(euro, values):
+ assert euro == values.pop(0)
+
+ @when(
+ parse("I lose {} Euro"),
+ anonymous_group_names=('euro',),
+ converters=dict(euro=int)
+ )
+ def i_pay(euro, values):
+ assert euro == values.pop(0)
+
+ @then(
+ parse(r"I should have {:d} Euro"),
+ anonymous_group_names=('euro',),
+ converters=dict(euro=int)
+ )
+ def i_should_have(euro, values):
+ assert euro == values.pop(0)
+
+- When run pytest
+
+- Then pytest outcome must contain tests with statuses:
+
+ +--------+
+ | passed |
+ +========+
+ | 1 |
+ +--------+
+
+Example: by "cucumber-expressions"
+
+
+::
+
+ https://github.com/cucumber/cucumber-expressions
+ Cucumber Expressions is an alternative to Regular Expressions
+ with a more intuitive syntax.
+
+- And File "conftest.py" with content:
+
+ .. code:: python
+
+ from functools import partial
+ import pytest
+ from pytest_bdd import given, when, then
+ from cucumber_expressions.parameter_type_registry import ParameterTypeRegistry
+ from cucumber_expressions.expression import CucumberExpression
+
+ parse = partial(
+ CucumberExpression,
+ parameter_type_registry = ParameterTypeRegistry()
+ )
+
+ @pytest.fixture
+ def values():
+ return [6, 3, 2, 1, 999999]
+
+ @given(parse("I have a wallet"), param_defaults={'wallet': 'wallet'})
+ def i_have_wallet(wallet):
+ assert wallet == 'wallet'
+
+ @given(
+ parse("I have {int} Euro"),
+ anonymous_group_names=('euro',),
+ converters=dict(euro=int)
+ )
+ def i_have(euro, values):
+ assert euro == values.pop(0)
+
+ @when(
+ parse("I pay {} Euro"),
+ anonymous_group_names=('euro',),
+ converters=dict(euro=int)
+ )
+ def i_pay(euro, values, request):
+ assert euro == values.pop(0)
+
+ @when(
+ parse(r"I lose {int} Dollar/Euro(s)"),
+ anonymous_group_names=('euro',),
+ converters=dict(euro=int)
+ )
+ def i_lose(euro, values):
+ assert euro == values.pop(0)
+
+ @then(
+ parse("I should have {int} Euro"),
+ anonymous_group_names=('euro',),
+ converters=dict(euro=int)
+ )
+ def i_should_have(euro, values):
+ assert euro == values.pop(0)
+
+- When run pytest
+
+- Then pytest outcome must contain tests with statuses:
+
+ +--------+
+ | passed |
+ +========+
+ | 1 |
+ +--------+
+
+Example: by "cucumber-regular-expressions"
+
+
+::
+
+ https://github.com/cucumber/cucumber-expressions
+
+ Cucumber Expressions is an alternative
+ to Regular Expressions with a more intuitive syntax.
+
+- And File "conftest.py" with content:
+
+ .. code:: python
+
+ import pytest
+ from pytest_bdd import given, when, then
+ from functools import partial
+
+ from cucumber_expressions.parameter_type_registry import ParameterTypeRegistry
+ from cucumber_expressions.regular_expression import (
+ RegularExpression as CucumberRegularExpression
+ )
+
+ parse = partial(
+ CucumberRegularExpression,
+ parameter_type_registry = ParameterTypeRegistry()
+ )
+
+ @pytest.fixture
+ def values():
+ return [6, 3, 2, 1, 999999]
+
+ @given(parse("I have a wallet"), param_defaults={'wallet': 'wallet'})
+ def i_have_wallet(wallet):
+ assert wallet == 'wallet'
+
+ @given(
+ parse(r"I have (\d+) Euro"),
+ anonymous_group_names=('euro',),
+ converters=dict(euro=int)
+ )
+ def i_have(euro, values):
+ assert euro == values.pop(0)
+
+ @when(
+ parse("I pay (.*) Euro"),
+ anonymous_group_names=('euro',),
+ converters=dict(euro=int)
+ )
+ def i_pay(euro, values, request):
+ assert euro == values.pop(0)
+
+ @when(
+ parse(r"I lose (.+) Euro"),
+ anonymous_group_names=('euro',),
+ converters=dict(euro=int)
+ )
+ def i_lose(euro, values):
+ assert euro == values.pop(0)
+
+ @then(
+ parse(r"I should have (\d+) Euro"),
+ anonymous_group_names=('euro',),
+ converters=dict(euro=int)
+ )
+ def i_should_have(euro, values):
+ assert euro == values.pop(0)
+
+- When run pytest
+
+- Then pytest outcome must contain tests with statuses:
+
+ +--------+
+ | passed |
+ +========+
+ | 1 |
+ +--------+
+
+Example: by "regular-expressions"
+
+
+::
+
+ This uses full regular expressions to parse the clause text. You will
+ need to use named groups "(?P...)" to define the variables pulled
+ from the text and passed to your "step()" function.
+ Type conversion can only be done via "converters" step decorator
+ argument (see example in according feature).
+
+- Given File "conftest.py" with content:
+
+ .. code:: python
+
+ import pytest
+ from pytest_bdd import given, when, then
+ from re import compile as parse
+
+ @pytest.fixture
+ def values():
+ return [6, 3, 2, 1, 999999]
+
+ @given(parse("I have a wallet"), param_defaults={'wallet': 'wallet'})
+ def i_have_wallet(wallet):
+ assert wallet == 'wallet'
+
+ @given(parse(r"I have (?P\d+) Euro"), converters=dict(euro=int))
+ def i_have(euro, values):
+ assert euro == values.pop(0)
+
+ @when(
+ parse(r"I pay (\d+) Euro"),
+ anonymous_group_names=('euro',),
+ converters=dict(euro=int)
+ )
+ def i_pay(euro, values):
+ assert euro == values.pop(0)
+
+ @when(parse(r"I lose (.+) Euro"),
+ anonymous_group_names=('euro',),
+ converters=dict(euro=int)
+ )
+ def i_lose(euro, values):
+ assert euro == values.pop(0)
+
+ @then(parse(r"I should have (?P\d+) Euro"), converters=dict(euro=int))
+ def i_should_have(euro, values):
+ assert euro == values.pop(0)
+
+- When run pytest
+
+- Then pytest outcome must contain tests with statuses:
+
+ +--------+
+ | passed |
+ +========+
+ | 1 |
+ +--------+
diff --git a/docs/features/Step definition/Pytest fixtures substitution.feature.html b/docs/features/Step definition/Pytest fixtures substitution.feature.html
deleted file mode 100644
index 8514a353..00000000
--- a/docs/features/Step definition/Pytest fixtures substitution.feature.html
+++ /dev/null
@@ -1,77 +0,0 @@
-Feature: Step definition could use pytest fixtures as step parameters
-Test setup is implemented within the Given section. Even though these steps
-are executed imperatively to apply possible side-effects, pytest-bdd-ng is trying
-to benefit of the PyTest fixtures which is based on the dependency injection
-and makes the setup more declarative style.
-In pytest-bdd-ng you just declare an argument of the step function that it depends on
-and the PyTest will make sure to provide it.
-Scenario:
-
--
-
Given File "conftest.py" with content:
-from pytest import fixture
-from pytest_bdd import given, when, then
-
-@fixture
-def pocket():
- yield [{"cherry": "delicious"}]
-
-@given("I have an old pickle", param_defaults={"age": "old"}, target_fixture='pickle_age', params_fixtures_mapping=False)
-def i_have_cucumber(pocket):
- pocket.append({"age": "old", "cucumber": "pickle"})
-
-@when("I check pocket I found cucumber there")
-def i_check_pocket_for_cucumber(pocket):
- assert any(filter(lambda item: "cucumber" in item.keys(), pocket))
-
-@then("I lost everything")
-def i_check_pocket_for_cucumber(pocket):
- while pocket:
- pocket.pop()
-
-
--
-
Given File "Cucumber.feature" with content:
-Feature:
- Scenario:
- Given I have an old pickle
- When I check pocket I found cucumber there
- Then I lost everything
-
-
--
-
Given File "test_freshness.py" with content:
-from pytest_bdd import scenario
-
-@scenario("Cucumber.feature")
-def test_passing_feature(pocket):
- assert not pocket
-
-
--
-
When run pytest
-
-
-
-cli_args |
---disable-feature-autoload |
-
-
-
-
--
-
Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-
-
-
-
-1 |
-
-
-
-
-
diff --git a/docs/features/Step definition/Pytest fixtures substitution.feature.rst b/docs/features/Step definition/Pytest fixtures substitution.feature.rst
new file mode 100644
index 00000000..604a77c2
--- /dev/null
+++ b/docs/features/Step definition/Pytest fixtures substitution.feature.rst
@@ -0,0 +1,72 @@
+Feature: Step definition could use pytest fixtures as step parameters
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Test setup is implemented within the Given section. Even though these
+steps are executed imperatively to apply possible side-effects,
+pytest-bdd-ng is trying to benefit of the PyTest fixtures which is based
+on the dependency injection and makes the setup more declarative style.
+
+In pytest-bdd-ng you just declare an argument of the step function that
+it depends on and the PyTest will make sure to provide it.
+
+Scenario:
+'''''''''
+
+- Given File "conftest.py" with content:
+
+ .. code:: python
+
+ from pytest import fixture
+ from pytest_bdd import given, when, then
+
+ @fixture
+ def pocket():
+ yield [{"cherry": "delicious"}]
+
+ @given("I have an old pickle", param_defaults={"age": "old"}, target_fixture='pickle_age', params_fixtures_mapping=False)
+ def i_have_cucumber(pocket):
+ pocket.append({"age": "old", "cucumber": "pickle"})
+
+ @when("I check pocket I found cucumber there")
+ def i_check_pocket_for_cucumber(pocket):
+ assert any(filter(lambda item: "cucumber" in item.keys(), pocket))
+
+ @then("I lost everything")
+ def i_check_pocket_for_cucumber(pocket):
+ while pocket:
+ pocket.pop()
+
+- Given File "Cucumber.feature" with content:
+
+ .. code:: gherkin
+
+ Feature:
+ Scenario:
+ Given I have an old pickle
+ When I check pocket I found cucumber there
+ Then I lost everything
+
+- Given File "test_freshness.py" with content:
+
+ .. code:: python
+
+ from pytest_bdd import scenario
+
+ @scenario("Cucumber.feature")
+ def test_passing_feature(pocket):
+ assert not pocket
+
+- When run pytest
+
+ ======== ==========================
+ cli_args --disable-feature-autoload
+ ======== ==========================
+ ======== ==========================
+
+- Then pytest outcome must contain tests with statuses:
+
+ +--------+
+ | passed |
+ +========+
+ | 1 |
+ +--------+
diff --git a/docs/features/Step definition/Target fixtures specification.feature.html b/docs/features/Step definition/Target fixtures specification.feature.html
deleted file mode 100644
index 6016042c..00000000
--- a/docs/features/Step definition/Target fixtures specification.feature.html
+++ /dev/null
@@ -1,117 +0,0 @@
-Feature: Step definition could override or setup new fixture
-Dependency injection is not a panacea if you have complex structure of your test setup data.
-Sometimes there's a need such a given step which would imperatively change
-the fixture only for certain test (scenario), while for other tests
-it will stay untouched. To allow this, special parameter target_fixture
exists in the decorator:
-Scenario: Single fixture injection
-
--
-
Given File "conftest.py" with content:
-from pytest_bdd import given
-
-@given("I have an old pickle", param_defaults={"age": "old"}, target_fixture='pickle_age', params_fixtures_mapping=False)
-def i_have_cucumber(age):
- yield age
-
-
--
-
Given File "Freshness.feature" with content:
-Feature:
- Scenario:
- Given I have an old pickle
-
-
--
-
Given File "test_freshness.py" with content:
-from pytest_bdd import scenario
-
-@scenario("Freshness.feature")
-def test_passing_feature(pickle_age, request):
- assert pickle_age == 'old'
- assert request.getfixturevalue('pickle_age') == pickle_age
-
-
--
-
When run pytest
-
-
-
-cli_args |
---disable-feature-autoload |
-
-
-
-
--
-
Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-
-
-
-
-1 |
-
-
-
-
-
-Scenario: Multiple fixtures injection
-
--
-
Given File "conftest.py" with content:
-from pytest_bdd import given
-
-@given("I have an old pickle", target_fixtures=['pickle_age', 'cucumber_kind'])
-def i_have_cucumber():
- yield ['old', 'pickle']
-
-
--
-
Given File "Freshness.feature" with content:
-Feature:
- Scenario:
- Given I have an old pickle
-
-
--
-
Given File "test_freshness.py" with content:
-from pytest_bdd import scenario
-
-@scenario("Freshness.feature")
-def test_passing_feature(request, pickle_age, cucumber_kind):
- assert pickle_age == 'old'
- assert cucumber_kind == 'pickle'
- assert request.getfixturevalue('pickle_age') == pickle_age
- assert request.getfixturevalue('cucumber_kind') == cucumber_kind
-
-
--
-
When run pytest
-
-
-
-cli_args |
---disable-feature-autoload |
-
-
-
-
--
-
Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-
-
-
-
-1 |
-
-
-
-
-
diff --git a/docs/features/Step definition/Target fixtures specification.feature.rst b/docs/features/Step definition/Target fixtures specification.feature.rst
new file mode 100644
index 00000000..fcca2221
--- /dev/null
+++ b/docs/features/Step definition/Target fixtures specification.feature.rst
@@ -0,0 +1,104 @@
+Feature: Step definition could override or setup new fixture
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Dependency injection is not a panacea if you have complex structure of
+your test setup data. Sometimes there's a need such a given step which
+would imperatively change the fixture only for certain test (scenario),
+while for other tests it will stay untouched. To allow this, special
+parameter ``target_fixture`` exists in the decorator:
+
+Scenario: Single fixture injection
+''''''''''''''''''''''''''''''''''
+
+- Given File "conftest.py" with content:
+
+ .. code:: python
+
+ from pytest_bdd import given
+
+ @given("I have an old pickle", param_defaults={"age": "old"}, target_fixture='pickle_age', params_fixtures_mapping=False)
+ def i_have_cucumber(age):
+ yield age
+
+- Given File "Freshness.feature" with content:
+
+ .. code:: gherkin
+
+ Feature:
+ Scenario:
+ Given I have an old pickle
+
+- Given File "test_freshness.py" with content:
+
+ .. code:: python
+
+ from pytest_bdd import scenario
+
+ @scenario("Freshness.feature")
+ def test_passing_feature(pickle_age, request):
+ assert pickle_age == 'old'
+ assert request.getfixturevalue('pickle_age') == pickle_age
+
+- When run pytest
+
+ ======== ==========================
+ cli_args --disable-feature-autoload
+ ======== ==========================
+ ======== ==========================
+
+- Then pytest outcome must contain tests with statuses:
+
+ +--------+
+ | passed |
+ +========+
+ | 1 |
+ +--------+
+
+Scenario: Multiple fixtures injection
+'''''''''''''''''''''''''''''''''''''
+
+- Given File "conftest.py" with content:
+
+ .. code:: python
+
+ from pytest_bdd import given
+
+ @given("I have an old pickle", target_fixtures=['pickle_age', 'cucumber_kind'])
+ def i_have_cucumber():
+ yield ['old', 'pickle']
+
+- Given File "Freshness.feature" with content:
+
+ .. code:: gherkin
+
+ Feature:
+ Scenario:
+ Given I have an old pickle
+
+- Given File "test_freshness.py" with content:
+
+ .. code:: python
+
+ from pytest_bdd import scenario
+
+ @scenario("Freshness.feature")
+ def test_passing_feature(request, pickle_age, cucumber_kind):
+ assert pickle_age == 'old'
+ assert cucumber_kind == 'pickle'
+ assert request.getfixturevalue('pickle_age') == pickle_age
+ assert request.getfixturevalue('cucumber_kind') == cucumber_kind
+
+- When run pytest
+
+ ======== ==========================
+ cli_args --disable-feature-autoload
+ ======== ==========================
+ ======== ==========================
+
+- Then pytest outcome must contain tests with statuses:
+
+ +--------+
+ | passed |
+ +========+
+ | 1 |
+ +--------+
diff --git a/docs/features/Step/Data table.feature.html b/docs/features/Step/Data table.feature.html
deleted file mode 100644
index 8b24c898..00000000
--- a/docs/features/Step/Data table.feature.html
+++ /dev/null
@@ -1,44 +0,0 @@
-Feature: Steps could have docstrings
-Scenario:
-
-- Given File "Steps.feature" with content:
-
Feature:
- Scenario:
- Given I check step datatable
- |first|second|
- | a| b|
-
-
-
-- And File "conftest.py" with content:
-
from pytest_bdd import given
-from messages import Step
-
-def get_datatable_row_values(row):
- return list(map(lambda cell: cell.value, row.cells))
-
-@given('I check step datatable')
-def _(step: Step):
- title_row, *data_rows = step.data_table.rows
- assert get_datatable_row_values(title_row) == ["first", "second"]
- assert get_datatable_row_values(data_rows[0]) == ["a", "b"]
-
-
-- When run pytest
-- Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-failed |
-
-
-
-
-1 |
-0 |
-
-
-
-
-
diff --git a/docs/features/Step/Data table.feature.rst b/docs/features/Step/Data table.feature.rst
new file mode 100644
index 00000000..99e48aa3
--- /dev/null
+++ b/docs/features/Step/Data table.feature.rst
@@ -0,0 +1,41 @@
+Feature: Steps could have docstrings
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Scenario:
+'''''''''
+
+- Given File "Steps.feature" with content:
+
+ .. code:: gherkin
+
+ Feature:
+ Scenario:
+ Given I check step datatable
+ |first|second|
+ | a| b|
+
+- And File "conftest.py" with content:
+
+ .. code:: python
+
+ from pytest_bdd import given
+ from messages import Step
+
+ def get_datatable_row_values(row):
+ return list(map(lambda cell: cell.value, row.cells))
+
+ @given('I check step datatable')
+ def _(step: Step):
+ title_row, *data_rows = step.data_table.rows
+ assert get_datatable_row_values(title_row) == ["first", "second"]
+ assert get_datatable_row_values(data_rows[0]) == ["a", "b"]
+
+- When run pytest
+
+- Then pytest outcome must contain tests with statuses:
+
+ ====== ======
+ passed failed
+ ====== ======
+ 1 0
+ ====== ======
diff --git a/docs/features/Step/Doc string.feature.html b/docs/features/Step/Doc string.feature.html
deleted file mode 100644
index 52ae6bae..00000000
--- a/docs/features/Step/Doc string.feature.html
+++ /dev/null
@@ -1,38 +0,0 @@
-Feature: Steps could have docstrings
-Scenario:
-
-- Given File "Steps.feature" with content:
-
Feature:
- Scenario:
- Given I check step docstring
- ```
- Step docstring
- ```
-
-
-- And File "conftest.py" with content:
-
from pytest_bdd import given
-
-@given('I check step docstring')
-def _(step):
- assert step.doc_string.content == "Step docstring"
-
-
-- When run pytest
-- Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-failed |
-
-
-
-
-1 |
-0 |
-
-
-
-
-
diff --git a/docs/features/Step/Doc string.feature.rst b/docs/features/Step/Doc string.feature.rst
new file mode 100644
index 00000000..304e2b51
--- /dev/null
+++ b/docs/features/Step/Doc string.feature.rst
@@ -0,0 +1,36 @@
+Feature: Steps could have docstrings
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Scenario:
+'''''''''
+
+- Given File "Steps.feature" with content:
+
+ .. code:: gherkin
+
+ Feature:
+ Scenario:
+ Given I check step docstring
+ ```
+ Step docstring
+ ```
+
+- And File "conftest.py" with content:
+
+ .. code:: python
+
+ from pytest_bdd import given
+
+ @given('I check step docstring')
+ def _(step):
+ assert step.doc_string.content == "Step docstring"
+
+- When run pytest
+
+- Then pytest outcome must contain tests with statuses:
+
+ ====== ======
+ passed failed
+ ====== ======
+ 1 0
+ ====== ======
diff --git a/docs/features/Step/Step definition bounding.feature.html b/docs/features/Step/Step definition bounding.feature.html
deleted file mode 100644
index 9e457661..00000000
--- a/docs/features/Step/Step definition bounding.feature.html
+++ /dev/null
@@ -1,264 +0,0 @@
-Feature: Gherkin steps bounding to steps definitions
-Scenario: Steps are executed by corresponding step keyword decorator
-
-- Given File "steps.feature" with content:
-
Feature: Steps are executed by corresponding step keyword decorator
-
- Scenario:
- Step execution definitions are pytest fixtures by their nature
- and are stored at pytest "conftest.py" files (or any other place
- where pytest fixtures could be placed)
-
- * Step is executed by plain step decorator
- Given Step is executed by given step decorator
- When Step is executed by when step decorator
- Then Step is executed by then step decorator
-
- Then there are passed steps by kind:
- |step|given|when|then|
- | 1| 1| 1| 1|
-
-
-- And File "conftest.py" with content:
-
from pytest_bdd import given, when, then, step
-from pytest import fixture
-
-# pytest fixtures could be used from step definitions, so some
-# test preconditions could be stored on the pytest level
-@fixture
-def step_counter():
- yield {'step': 0, 'given': 0,'when': 0,'then': 0,}
-
-# Step with any kind of keyword could be bounded to step decorated with "step" definition
-@step('Step is executed by plain step decorator')
-def plain_step(step_counter):
- step_counter['step'] += 1
-
-# Step with "Given" keyword could be bounded to step decorated with "given" definition
-@given('Step is executed by given step decorator')
-def given_step(step_counter):
- step_counter['given'] += 1
-
-# Same as "given"
-@when('Step is executed by when step decorator')
-def when_step(step_counter):
- step_counter['when'] += 1
-
-# Same as "given"
-@then('Step is executed by then step decorator')
-def then_step(step_counter):
- step_counter['then'] += 1
-
-@then('there are passed steps by kind:')
-def check_step_counter(step, step_counter):
- # Step datatables data could be accessed in the next manner
- step_data_table = step.data_table
- oracle_results_header = [cell.value for cell in step_data_table.rows[0].cells]
- oracle_results_values = [int(cell.value) for cell in step_data_table.rows[1].cells]
- oracle_result = dict(zip(oracle_results_header, oracle_results_values))
-
- assert oracle_result == step_counter
-
-
-- When run pytest
-- Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-
-
-
-
-1 |
-
-
-
-
-
-Scenario: Steps could be executed by aliased step keyword decorator
-Could be useful to declare the same fixtures or steps with
-different names for better readability. In order to use the same step
-function with multiple step names simply decorate it multiple times.
-
-
-- Given File "steps.feature" with content:
-
Feature: Steps could be executed by aliased step keyword decorator
- Scenario:
- Given Step counter
-
- * Step is executed by aliased step decorator
- Given Step is executed by aliased step decorator
- When Step is executed by aliased step decorator
- Then Step is executed by aliased step decorator
-
- Then there are "4" passed aliased steps
-
-
-- And File "conftest.py" with content:
-
from pytest_bdd import given, when, then, step
-
-@given('Step counter', target_fixture='step_counter')
-def step_counter():
- yield {'steps_count': 0}
-
-@step('Step is executed by aliased step decorator')
-@given('Step is executed by aliased step decorator')
-@when('Step is executed by aliased step decorator')
-@then('Step is executed by aliased step decorator')
-def aliased_step(step_counter):
- step_counter['steps_count'] += 1
-
-@then(
- 'there are "{int}" passed aliased steps',
- anonymous_group_names=('oracle_steps',),
-)
-def then_step(step_counter, oracle_steps):
- assert step_counter['steps_count'] == oracle_steps
-
-
-- When run pytest
-- Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-
-
-
-
-1 |
-
-
-
-
-
-Rule: Steps could be executed by liberal step keyword decorator
-Step definition decorator could be "liberal"
-- so it could be bound to any kind of keyword
-
-Background:
-
-Scenario: Same step is used with different keywords
-
-- Given File "conftest.py" with content:
-
from pytest_bdd import given, when, then, step
-
-@given('Step counter', target_fixture='step_counter')
-def step_counter():
- yield {'steps_count': 0}
-
-@step('Step is executed by liberal step decorator', liberal=True)
-@given('Step is executed by liberal given decorator', liberal=True)
-@when('Step is executed by liberal when decorator', liberal=True)
-@then('Step is executed by liberal then decorator', liberal=True)
-def liberal_step(step_counter):
- step_counter['steps_count'] += 1
-
-@then(
- 'there are "{int}" passed liberal steps',
- anonymous_group_names=('oracle_steps',),
-)
-def then_step(step_counter, oracle_steps):
- assert step_counter['steps_count'] == oracle_steps
-
-
-- When run pytest
-- Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-
-
-
-
-1 |
-
-
-
-
-
-Scenario: Keyworded steps could be treated as liberal by pytest command line option
-
--
-
Given File "conftest.py" with content:
-from pytest_bdd import given, when, then, step
-
-@given('Step counter', target_fixture='step_counter')
-def step_counter():
- yield {'steps_count': 0}
-
-@step('Step is executed by liberal step decorator')
-@given('Step is executed by liberal given decorator')
-@when('Step is executed by liberal when decorator')
-@then('Step is executed by liberal then decorator')
-def liberal_step(step_counter):
- step_counter['steps_count'] += 1
-
-@then(
- 'there are "{int}" passed liberal steps',
- anonymous_group_names=('oracle_steps',),
-)
-def then_step(step_counter, oracle_steps):
- assert step_counter['steps_count'] == oracle_steps
-
-
--
-
When run pytest
-
-
-
-cli_args |
---liberal-steps |
-
-
-
-
--
-
Then pytest outcome must contain tests with statuses:
-
-
-
-passed |
-failed |
-
-
-
-
-1 |
-0 |
-
-
-
-
-
diff --git a/docs/features/Step/Step definition bounding.feature.rst b/docs/features/Step/Step definition bounding.feature.rst
new file mode 100644
index 00000000..71290ec2
--- /dev/null
+++ b/docs/features/Step/Step definition bounding.feature.rst
@@ -0,0 +1,256 @@
+Feature: Gherkin steps bounding to steps definitions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Scenario: Steps are executed by corresponding step keyword decorator
+''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
+
+- Given File "steps.feature" with content:
+
+ .. code:: gherkin
+
+ Feature: Steps are executed by corresponding step keyword decorator
+
+ Scenario:
+ Step execution definitions are pytest fixtures by their nature
+ and are stored at pytest "conftest.py" files (or any other place
+ where pytest fixtures could be placed)
+
+ * Step is executed by plain step decorator
+ Given Step is executed by given step decorator
+ When Step is executed by when step decorator
+ Then Step is executed by then step decorator
+
+ Then there are passed steps by kind:
+ |step|given|when|then|
+ | 1| 1| 1| 1|
+
+- And File "conftest.py" with content:
+
+ .. code:: python
+
+ from pytest_bdd import given, when, then, step
+ from pytest import fixture
+
+ # pytest fixtures could be used from step definitions, so some
+ # test preconditions could be stored on the pytest level
+ @fixture
+ def step_counter():
+ yield {'step': 0, 'given': 0,'when': 0,'then': 0,}
+
+ # Step with any kind of keyword could be bounded to step decorated with "step" definition
+ @step('Step is executed by plain step decorator')
+ def plain_step(step_counter):
+ step_counter['step'] += 1
+
+ # Step with "Given" keyword could be bounded to step decorated with "given" definition
+ @given('Step is executed by given step decorator')
+ def given_step(step_counter):
+ step_counter['given'] += 1
+
+ # Same as "given"
+ @when('Step is executed by when step decorator')
+ def when_step(step_counter):
+ step_counter['when'] += 1
+
+ # Same as "given"
+ @then('Step is executed by then step decorator')
+ def then_step(step_counter):
+ step_counter['then'] += 1
+
+ @then('there are passed steps by kind:')
+ def check_step_counter(step, step_counter):
+ # Step datatables data could be accessed in the next manner
+ step_data_table = step.data_table
+ oracle_results_header = [cell.value for cell in step_data_table.rows[0].cells]
+ oracle_results_values = [int(cell.value) for cell in step_data_table.rows[1].cells]
+ oracle_result = dict(zip(oracle_results_header, oracle_results_values))
+
+ assert oracle_result == step_counter
+
+- When run pytest
+
+- Then pytest outcome must contain tests with statuses:
+
+ +--------+
+ | passed |
+ +========+
+ | 1 |
+ +--------+
+
+Scenario: Steps could be executed by aliased step keyword decorator
+'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
+
+::
+
+ Could be useful to declare the same fixtures or steps with
+ different names for better readability. In order to use the same step
+ function with multiple step names simply decorate it multiple times.
+
+- Given File "steps.feature" with content:
+
+ .. code:: gherkin
+
+ Feature: Steps could be executed by aliased step keyword decorator
+ Scenario:
+ Given Step counter
+
+ * Step is executed by aliased step decorator
+ Given Step is executed by aliased step decorator
+ When Step is executed by aliased step decorator
+ Then Step is executed by aliased step decorator
+
+ Then there are "4" passed aliased steps
+
+- And File "conftest.py" with content:
+
+ .. code:: python
+
+ from pytest_bdd import given, when, then, step
+
+ @given('Step counter', target_fixture='step_counter')
+ def step_counter():
+ yield {'steps_count': 0}
+
+ @step('Step is executed by aliased step decorator')
+ @given('Step is executed by aliased step decorator')
+ @when('Step is executed by aliased step decorator')
+ @then('Step is executed by aliased step decorator')
+ def aliased_step(step_counter):
+ step_counter['steps_count'] += 1
+
+ @then(
+ 'there are "{int}" passed aliased steps',
+ anonymous_group_names=('oracle_steps',),
+ )
+ def then_step(step_counter, oracle_steps):
+ assert step_counter['steps_count'] == oracle_steps
+
+- When run pytest
+
+- Then pytest outcome must contain tests with statuses:
+
+ +--------+
+ | passed |
+ +========+
+ | 1 |
+ +--------+
+
+Rule: Steps could be executed by liberal step keyword decorator
+'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
+
+::
+
+ Step definition decorator could be "liberal"
+ - so it could be bound to any kind of keyword
+
+Background:
+
+
+- Given File "steps.feature" with content:
+
+ .. code:: gherkin
+
+ Feature: Steps could be executed by liberal step keyword decorator
+ Scenario:
+ Given Step counter
+
+ * Step is executed by liberal step decorator
+ Given Step is executed by liberal step decorator
+ When Step is executed by liberal step decorator
+ Then Step is executed by liberal step decorator
+
+ * Step is executed by liberal given decorator
+ Given Step is executed by liberal given decorator
+ When Step is executed by liberal given decorator
+ Then Step is executed by liberal given decorator
+
+ * Step is executed by liberal when decorator
+ Given Step is executed by liberal when decorator
+ When Step is executed by liberal when decorator
+ Then Step is executed by liberal when decorator
+
+ * Step is executed by liberal then decorator
+ Given Step is executed by liberal then decorator
+ When Step is executed by liberal then decorator
+ Then Step is executed by liberal then decorator
+
+ Then there are "16" passed liberal steps
+
+Scenario: Same step is used with different keywords
+
+
+- Given File "conftest.py" with content:
+
+ .. code:: python
+
+ from pytest_bdd import given, when, then, step
+
+ @given('Step counter', target_fixture='step_counter')
+ def step_counter():
+ yield {'steps_count': 0}
+
+ @step('Step is executed by liberal step decorator', liberal=True)
+ @given('Step is executed by liberal given decorator', liberal=True)
+ @when('Step is executed by liberal when decorator', liberal=True)
+ @then('Step is executed by liberal then decorator', liberal=True)
+ def liberal_step(step_counter):
+ step_counter['steps_count'] += 1
+
+ @then(
+ 'there are "{int}" passed liberal steps',
+ anonymous_group_names=('oracle_steps',),
+ )
+ def then_step(step_counter, oracle_steps):
+ assert step_counter['steps_count'] == oracle_steps
+
+- When run pytest
+
+- Then pytest outcome must contain tests with statuses:
+
+ +--------+
+ | passed |
+ +========+
+ | 1 |
+ +--------+
+
+Scenario: Keyworded steps could be treated as liberal by pytest command line option
+
+
+- Given File "conftest.py" with content:
+
+ .. code:: python
+
+ from pytest_bdd import given, when, then, step
+
+ @given('Step counter', target_fixture='step_counter')
+ def step_counter():
+ yield {'steps_count': 0}
+
+ @step('Step is executed by liberal step decorator')
+ @given('Step is executed by liberal given decorator')
+ @when('Step is executed by liberal when decorator')
+ @then('Step is executed by liberal then decorator')
+ def liberal_step(step_counter):
+ step_counter['steps_count'] += 1
+
+ @then(
+ 'there are "{int}" passed liberal steps',
+ anonymous_group_names=('oracle_steps',),
+ )
+ def then_step(step_counter, oracle_steps):
+ assert step_counter['steps_count'] == oracle_steps
+
+- When run pytest
+
+ ======== ===============
+ cli_args --liberal-steps
+ ======== ===============
+ ======== ===============
+
+- Then pytest outcome must contain tests with statuses:
+
+ ====== ======
+ passed failed
+ ====== ======
+ 1 0
+ ====== ======
diff --git a/docs/features/Tutorial/Launch.feature.html b/docs/features/Tutorial/Launch.feature.html
deleted file mode 100644
index eaaf4b0b..00000000
--- a/docs/features/Tutorial/Launch.feature.html
+++ /dev/null
@@ -1,36 +0,0 @@
-Feature: Simple project tests that use pytest-bdd-ng could be run via pytest
-Project per se: https://github.com/elchupanebrej/pytest-bdd-ng/tree/default/docs/tutorial
-
-Scenario: Catalog example with simplest steps
-
diff --git a/docs/features/Tutorial/Launch.feature.rst b/docs/features/Tutorial/Launch.feature.rst
new file mode 100644
index 00000000..e9f8fbfc
--- /dev/null
+++ b/docs/features/Tutorial/Launch.feature.rst
@@ -0,0 +1,26 @@
+Feature: Simple project tests that use pytest-bdd-ng could be run via pytest
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+::
+
+ Project per se: https://github.com/elchupanebrej/pytest-bdd-ng/tree/default/docs/tutorial
+
+Scenario: Catalog example with simplest steps
+'''''''''''''''''''''''''''''''''''''''''''''
+
+- Given Copy path from "docs/tutorial" to test path "tutorial"
+
+- When run pytest
+
+ ======== ================== ==============
+ cli_args --rootdir=tutorial tutorial/tests
+ ======== ================== ==============
+ ======== ================== ==============
+
+- Then pytest outcome must contain tests with statuses:
+
+ +--------+
+ | passed |
+ +========+
+ | 1 |
+ +--------+
diff --git a/docs/features/features.rst b/docs/features/features.rst
index ef755c4b..99f4cfff 100644
--- a/docs/features/features.rst
+++ b/docs/features/features.rst
@@ -1,173 +1,150 @@
-Features
-========
-
-.. NOTE:: Features below are part of end-to-end test suite; You always could find most specific
- use cases of **pytest-bdd-ng** by investigation of its regression
- test suite https://github.com/elchupanebrej/pytest-bdd-ng/tree/default/tests
-
-
-
-Tutorial
---------
-
-Launch.feature
-##############
-
-.. raw:: html
- :file: ../features/Tutorial/Launch.feature.html
-
-Step definition
----------------
-
-Pytest fixtures substitution.feature
-####################################
-
-.. raw:: html
- :file: ../features/Step definition/Pytest fixtures substitution.feature.html
-
-Target fixtures specification.feature
-#####################################
-
-.. raw:: html
- :file: ../features/Step definition/Target fixtures specification.feature.html
-
-Parameters
-##########
-
-Conversion.feature
-!!!!!!!!!!!!!!!!!!
-
-.. raw:: html
- :file: ../features/Step definition/Parameters/Conversion.feature.html
-
-Defaults.feature
-!!!!!!!!!!!!!!!!
-
-.. raw:: html
- :file: ../features/Step definition/Parameters/Defaults.feature.html
-
-Injection as fixtures.feature
-!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
-.. raw:: html
- :file: ../features/Step definition/Parameters/Injection as fixtures.feature.html
-
-Parsing by custom parser.feature
-!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
-.. raw:: html
- :file: ../features/Step definition/Parameters/Parsing by custom parser.feature.html
-
-Parsing.feature
-!!!!!!!!!!!!!!!
-
-.. raw:: html
- :file: ../features/Step definition/Parameters/Parsing.feature.html
-
-Step
-----
-
-Data table.feature
-##################
-
-.. raw:: html
- :file: ../features/Step/Data table.feature.html
-
-Doc string.feature
-##################
-
-.. raw:: html
- :file: ../features/Step/Doc string.feature.html
-
-Step definition bounding.feature
-################################
-
-.. raw:: html
- :file: ../features/Step/Step definition bounding.feature.html
-
-Scenario
---------
-
-Description.feature
-###################
-
-.. raw:: html
- :file: ../features/Scenario/Description.feature.html
-
-Tag.feature
-###########
-
-.. raw:: html
- :file: ../features/Scenario/Tag.feature.html
-
-Outline
-#######
-
-Examples Tag.feature
-!!!!!!!!!!!!!!!!!!!!
-
-.. raw:: html
- :file: ../features/Scenario/Outline/Examples Tag.feature.html
-
-Report
-------
-
-Gathering.feature
-#################
-
-.. raw:: html
- :file: ../features/Report/Gathering.feature.html
-
-Feature
--------
-
-Description.feature
-###################
-
-.. raw:: html
- :file: ../features/Feature/Description.feature.html
-
-Localization.feature
-####################
-
-.. raw:: html
- :file: ../features/Feature/Localization.feature.html
-
-Tag conversion.feature
-######################
-
-.. raw:: html
- :file: ../features/Feature/Tag conversion.feature.html
-
-Tag.feature
-###########
-
-.. raw:: html
- :file: ../features/Feature/Tag.feature.html
-
-Load
-####
-
-Autoload.feature
-!!!!!!!!!!!!!!!!
-
-.. raw:: html
- :file: ../features/Feature/Load/Autoload.feature.html
-
-Scenario function loader.feature
-!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
-.. raw:: html
- :file: ../features/Feature/Load/Scenario function loader.feature.html
-
-Scenario search from base directory.feature
-!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
-.. raw:: html
- :file: ../features/Feature/Load/Scenario search from base directory.feature.html
-
-Scenario search from base url.feature
-!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
-.. raw:: html
- :file: ../features/Feature/Load/Scenario search from base url.feature.html
+Features
+========
+
+.. NOTE:: Features below are part of end-to-end test suite; You always could find most specific
+ use cases of **pytest-bdd-ng** by investigation of its regression
+ test suite https://github.com/elchupanebrej/pytest-bdd-ng/tree/default/tests
+
+
+
+Tutorial
+--------
+
+Launch
+######
+
+.. include:: features/Tutorial/Launch.feature.rst
+
+Step definition
+---------------
+
+Pytest fixtures substitution
+############################
+
+.. include:: features/Step definition/Pytest fixtures substitution.feature.rst
+
+Target fixtures specification
+#############################
+
+.. include:: features/Step definition/Target fixtures specification.feature.rst
+
+Parameters
+##########
+
+Conversion
+!!!!!!!!!!
+
+.. include:: features/Step definition/Parameters/Conversion.feature.rst
+
+Defaults
+!!!!!!!!
+
+.. include:: features/Step definition/Parameters/Defaults.feature.rst
+
+Injection as fixtures
+!!!!!!!!!!!!!!!!!!!!!
+
+.. include:: features/Step definition/Parameters/Injection as fixtures.feature.rst
+
+Parsing by custom parser
+!!!!!!!!!!!!!!!!!!!!!!!!
+
+.. include:: features/Step definition/Parameters/Parsing by custom parser.feature.rst
+
+Parsing
+!!!!!!!
+
+.. include:: features/Step definition/Parameters/Parsing.feature.rst
+
+Step
+----
+
+Data table
+##########
+
+.. include:: features/Step/Data table.feature.rst
+
+Doc string
+##########
+
+.. include:: features/Step/Doc string.feature.rst
+
+Step definition bounding
+########################
+
+.. include:: features/Step/Step definition bounding.feature.rst
+
+Scenario
+--------
+
+Description
+###########
+
+.. include:: features/Scenario/Description.feature.rst
+
+Tag
+###
+
+.. include:: features/Scenario/Tag.feature.rst
+
+Outline
+#######
+
+Examples Tag
+!!!!!!!!!!!!
+
+.. include:: features/Scenario/Outline/Examples Tag.feature.rst
+
+Report
+------
+
+Gathering
+#########
+
+.. include:: features/Report/Gathering.feature.rst
+
+Feature
+-------
+
+Description
+###########
+
+.. include:: features/Feature/Description.feature.rst
+
+Localization
+############
+
+.. include:: features/Feature/Localization.feature.rst
+
+Tag conversion
+##############
+
+.. include:: features/Feature/Tag conversion.feature.rst
+
+Tag
+###
+
+.. include:: features/Feature/Tag.feature.rst
+
+Load
+####
+
+Autoload
+!!!!!!!!
+
+.. include:: features/Feature/Load/Autoload.feature.rst
+
+Scenario function loader
+!!!!!!!!!!!!!!!!!!!!!!!!
+
+.. include:: features/Feature/Load/Scenario function loader.feature.rst
+
+Scenario search from base directory
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+.. include:: features/Feature/Load/Scenario search from base directory.feature.rst
+
+Scenario search from base url
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+.. include:: features/Feature/Load/Scenario search from base url.feature.rst
diff --git a/docs/index.rst b/docs/index.rst
index 3abcff1d..510198dc 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -4,5 +4,3 @@ Welcome to Pytest-BDD-NextGeneration's documentation!
:glob:
include.rst
-
-.. include:: include
diff --git a/features/Feature/Description.feature.md b/features/Feature/Description.feature.md
index bf3213df..8b7448b9 100644
--- a/features/Feature/Description.feature.md
+++ b/features/Feature/Description.feature.md
@@ -5,7 +5,6 @@
## Scenario:
* Given File "Description.feature" with content:
-
```gherkin
Feature:
My Feature description
@@ -13,7 +12,6 @@
Given I check feature description
```
* And File "conftest.py" with content:
-
```python
from pytest_bdd import given
@@ -23,7 +21,6 @@
```
* When run pytest
* Then pytest outcome must contain tests with statuses:
-
| passed | failed |
|--------|--------|
| 1 | 0 |
diff --git a/features/Feature/Load/Autoload.feature.md b/features/Feature/Load/Autoload.feature.md
index 9ccd98a8..7b5ba1c6 100644
--- a/features/Feature/Load/Autoload.feature.md
+++ b/features/Feature/Load/Autoload.feature.md
@@ -20,6 +20,7 @@
* Given Install npm packages
| packages | @cucumber/gherkin |
|----------|-------------------|
+
* Given File "conftest.py" with content:
```python
from pytest_bdd import step
diff --git a/pyproject.toml b/pyproject.toml
index 41433ad2..d741ca99 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,179 +1,189 @@
-[build-system]
-build-backend = "setuptools.build_meta"
-requires = ["setuptools<68", "wheel"]
-
-[project]
-authors = [
- {name = "Oleg Pidsadnyi", email = "oleg.pidsadnyi@gmail.com"},
- {name = "Anatoly Bubenkov", email = "bubenkoff@gmail.com"},
- {name = "Adam Coddington", email = "me@adamcoddington.net"},
- {name = "Albert-Jan Nijburg", email = "albertjan@curit.com"},
- {name = "Alessio Bogon", email = "youtux@gmail.com"},
- {name = "Andrey Makhnach", email = "andrey.makhnach@gmail.com"},
- {name = "Aron Curzon", email = "curzona@gmail.com"},
- {name = "Dmitrijs Milajevs", email = "dimazest@gmail.com"},
- {name = "Dmitry Kolyagin", email = "pauk-slon@users.noreply.github.com"},
- {name = "Florian Bruhin", email = "me@the-compiler.org"},
- {name = "Floris Bruynooghe", email = "flub@devork.be"},
- {name = "Harro van der Klauw", email = "hvdklauw@gmail.com"},
- {name = "Hugo van Kemenade", email = "hugovk@users.noreply.github.com"},
- {name = "Konstantin Goloveshko", email = "kostya.goloveshko@gmail.com"},
- {name = "Laurence Rowe", email = "l@lrowe.co.uk"},
- {name = "Leonardo Santagada", email = "santagada@github.com"},
- {name = "Milosz Sliwinski", email = "sliwinski.milosz@gmail.com"},
- {name = "Michiel Holtkamp", email = "github@elfstone.nl"},
- {name = "Robin Pedersen", email = "ropez@github.com"},
- {name = "Sergey Kraynev", email = "sergejyit@gmail.com"}
-]
-classifiers = [
- "Development Status :: 4 - Beta",
- "Framework :: Pytest",
- "Intended Audience :: Developers",
- "License :: OSI Approved :: MIT License",
- "Operating System :: POSIX",
- "Operating System :: Microsoft :: Windows",
- "Operating System :: MacOS :: MacOS X",
- "Topic :: Software Development :: Testing",
- "Topic :: Software Development :: Libraries",
- "Topic :: Utilities",
- "Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.9",
- "Programming Language :: Python :: 3.10",
- "Programming Language :: Python :: 3.11",
- "Programming Language :: Python :: 3.12",
- "Programming Language :: Python :: 3.13"
-]
-dependencies = [
- "aiohttp",
- "attrs",
- "certifi",
- "chevron",
- "ci-environment",
- # This is needed until messages become regular PyPi package https://github.com/cucumber/messages/pull/165
- "cuke-messages==0.1.0",
- "cucumber-expressions",
- "decopatch",
- "docopt-ng",
- "filelock",
- "gherkin-official>=24",
- "importlib-metadata;python_version<'3.10.0'",
- "importlib-resources",
- "makefun",
- "Mako",
- "ordered_set",
- "packaging",
- "parse",
- "parse_type>=0.6.0",
- "pathvalidate",
- "py",
- "pycmarkgfm",
- "pydantic>=2.0.3",
- "pytest>=5.2",
- "setuptools>=58",
- "cucumber-tag-expressions",
- "typing-extensions;python_version<'3.11.0'"
-]
-description = "BDD for pytest"
-license = {file = "LICENSE.rst"}
-maintainers = [
- {name = "Konstantin Goloveshko", email = "kostya.goloveshko@gmail.com"}
-]
-name = "pytest-bdd-ng"
-readme = {file = "README.rst", content-type = "text/x-rst"}
-requires-python = ">=3.9"
-version = "2.2.0"
-
-[project.entry-points]
-pytest11 = {"pytest-bdd" = "pytest_bdd.plugin"}
-
-[project.optional-dependencies]
-allure = [
- "allure-python-commons",
- "allure-pytest"
-]
-full = [
- "pytest-bdd-ng[allure]",
- "pytest-bdd-ng[struct-bdd]",
- "pytest-bdd-ng[test]",
- "pytest-bdd-ng[testtypes]"
-]
-struct-bdd = [
- "hjson",
- "json5",
- "pyhocon",
- 'tomli;python_version<"3.11.0"',
- "PyYAML",
- "types-PyYAML"
-]
-test = [
- "allure-python-commons-test",
- "execnet",
- "packaging",
- "PyHamcrest",
- "pytest-httpserver",
- "python-coveralls"
-]
-testenv = [
- "tox~=4.0",
- "codecov"
-]
-testtypes = [
- "mypy",
- "types-certifi",
- "types-docopt",
- "types-setuptools"
-]
-
-[project.scripts]
-bdd_tree_to_rst = "pytest_bdd.script.bdd_tree_to_rst:main"
-
-[project.urls]
-Documentation = "https://pytest-bdd-ng.readthedocs.io/en/default/"
-Repository = "https://github.com/elchupanebrej/pytest-bdd-ng"
-
-[tool.black]
-line-length = 120
-target-version = ["py39", "py310", "py311", "py312", "py313"]
-verbose = true
-
-[tool.isort]
-line_length = 120
-multi_line_output = 3
-profile = "black"
-
-[tool.mypy]
-files = "src/pytest_bdd/**/*.py"
-install_types = true
-non_interactive = true
-plugins = [
- "pydantic.mypy"
-]
-show_error_codes = true
-warn_return_any = true
-warn_unused_configs = true
-
-[[tool.mypy.overrides]]
-ignore_missing_imports = true
-module = [
- "allure_commons.*",
- "allure_pytest.*",
- "attr.*",
- "ci_environment.*",
- "cucumber_expressions.*",
- "cucumber_tag_expressions.*",
- "decopatch.*",
- "execnet.*",
- "gherkin.*",
- "hjson",
- "json5",
- "makefun",
- "mako.*",
- "ordered_set",
- "parse",
- "parse_type.*",
- "pluggy.*",
- "_pytest.*",
- "pyhocon",
- "pytest.*",
- "yaml.*"
-]
+[build-system]
+build-backend = "setuptools.build_meta"
+requires = ["setuptools<74", "wheel", "cffi"]
+
+[project]
+authors = [
+ {name = "Oleg Pidsadnyi", email = "oleg.pidsadnyi@gmail.com"},
+ {name = "Anatoly Bubenkov", email = "bubenkoff@gmail.com"},
+ {name = "Adam Coddington", email = "me@adamcoddington.net"},
+ {name = "Albert-Jan Nijburg", email = "albertjan@curit.com"},
+ {name = "Alessio Bogon", email = "youtux@gmail.com"},
+ {name = "Andrey Makhnach", email = "andrey.makhnach@gmail.com"},
+ {name = "Aron Curzon", email = "curzona@gmail.com"},
+ {name = "Dmitrijs Milajevs", email = "dimazest@gmail.com"},
+ {name = "Dmitry Kolyagin", email = "pauk-slon@users.noreply.github.com"},
+ {name = "Florian Bruhin", email = "me@the-compiler.org"},
+ {name = "Floris Bruynooghe", email = "flub@devork.be"},
+ {name = "Harro van der Klauw", email = "hvdklauw@gmail.com"},
+ {name = "Hugo van Kemenade", email = "hugovk@users.noreply.github.com"},
+ {name = "Konstantin Goloveshko", email = "kostya.goloveshko@gmail.com"},
+ {name = "Laurence Rowe", email = "l@lrowe.co.uk"},
+ {name = "Leonardo Santagada", email = "santagada@github.com"},
+ {name = "Milosz Sliwinski", email = "sliwinski.milosz@gmail.com"},
+ {name = "Michiel Holtkamp", email = "github@elfstone.nl"},
+ {name = "Robin Pedersen", email = "ropez@github.com"},
+ {name = "Sergey Kraynev", email = "sergejyit@gmail.com"}
+]
+classifiers = [
+ "Development Status :: 4 - Beta",
+ "Framework :: Pytest",
+ "Intended Audience :: Developers",
+ "License :: OSI Approved :: MIT License",
+ "Operating System :: POSIX",
+ "Operating System :: Microsoft :: Windows",
+ "Operating System :: MacOS :: MacOS X",
+ "Topic :: Software Development :: Testing",
+ "Topic :: Software Development :: Libraries",
+ "Topic :: Utilities",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
+ "Programming Language :: Python :: 3.12",
+ "Programming Language :: Python :: 3.13"
+]
+dependencies = [
+ "aiohttp",
+ "attrs",
+ "certifi",
+ "cffi",
+ "chevron",
+ "ci-environment",
+ # This is needed until messages become regular PyPi package https://github.com/cucumber/messages/pull/165
+ "cuke-messages==0.1.0",
+ "cucumber-expressions",
+ "decopatch",
+ "docopt-ng",
+ "filelock",
+ "gherkin-official>=24,<30",
+ "importlib-metadata;python_version<'3.10.0'",
+ "importlib-resources",
+ "makefun",
+ "Mako",
+ "ordered_set",
+ "packaging",
+ "parse",
+ "parse_type>=0.6.0",
+ "pathvalidate",
+ "py",
+ "pycmarkgfm",
+ "pydantic>=2.0.3",
+ "pytest>=5.2",
+ "setuptools<74",
+ "cucumber-tag-expressions",
+ "typing-extensions;python_version<'3.11.0'"
+]
+description = "BDD for pytest"
+license = {file = "LICENSE.rst"}
+maintainers = [
+ {name = "Konstantin Goloveshko", email = "kostya.goloveshko@gmail.com"}
+]
+name = "pytest-bdd-ng"
+readme = {file = "README.rst", content-type = "text/x-rst"}
+requires-python = ">=3.9"
+version = "2.2.0"
+
+[project.entry-points]
+pytest11 = {"pytest-bdd" = "pytest_bdd.plugin"}
+
+[project.optional-dependencies]
+allure = [
+ "allure-python-commons",
+ "allure-pytest"
+]
+doc-gen = [
+ 'docopt-ng',
+ 'pandoc',
+ 'panflute',
+ 'pycmarkgfm',
+ 'pypandoc'
+]
+full = [
+ "pytest-bdd-ng[allure]",
+ "pytest-bdd-ng[doc-gen]",
+ "pytest-bdd-ng[struct-bdd]",
+ "pytest-bdd-ng[test]",
+ "pytest-bdd-ng[testtypes]"
+]
+struct-bdd = [
+ "hjson",
+ "json5",
+ "pyhocon",
+ 'tomli;python_version<"3.11.0"',
+ "PyYAML",
+ "types-PyYAML"
+]
+test = [
+ "allure-python-commons-test",
+ "execnet",
+ "packaging",
+ "PyHamcrest",
+ "pytest-httpserver",
+ "python-coveralls",
+ "pytest-bdd-ng[doc-gen];python_version>='3.12.0'"
+]
+testenv = [
+ "tox~=4.0",
+ "codecov"
+]
+testtypes = [
+ "mypy",
+ "types-certifi",
+ "types-docopt",
+ "types-setuptools"
+]
+
+[project.scripts]
+bdd_tree_to_rst = "pytest_bdd.script.bdd_tree_to_rst:main"
+
+[project.urls]
+Documentation = "https://pytest-bdd-ng.readthedocs.io/en/default/"
+Repository = "https://github.com/elchupanebrej/pytest-bdd-ng"
+
+[tool.black]
+line-length = 120
+target-version = ["py39", "py310", "py311", "py312", "py313"]
+verbose = true
+
+[tool.isort]
+line_length = 120
+multi_line_output = 3
+profile = "black"
+
+[tool.mypy]
+files = "src/pytest_bdd/**/*.py"
+install_types = true
+non_interactive = true
+plugins = [
+ "pydantic.mypy"
+]
+show_error_codes = true
+warn_return_any = true
+warn_unused_configs = true
+
+[[tool.mypy.overrides]]
+ignore_missing_imports = true
+module = [
+ "allure_commons.*",
+ "allure_pytest.*",
+ "attr.*",
+ "ci_environment.*",
+ "cucumber_expressions.*",
+ "cucumber_tag_expressions.*",
+ "decopatch.*",
+ "execnet.*",
+ "gherkin.*",
+ "hjson",
+ "json5",
+ "makefun",
+ "mako.*",
+ "ordered_set",
+ "parse",
+ "parse_type.*",
+ "pluggy.*",
+ "_pytest.*",
+ "pyhocon",
+ "pytest.*",
+ "yaml.*"
+]
diff --git a/src/pytest_bdd/plugin.py b/src/pytest_bdd/plugin.py
index c4001f54..bc845e3c 100644
--- a/src/pytest_bdd/plugin.py
+++ b/src/pytest_bdd/plugin.py
@@ -6,6 +6,7 @@
from itertools import chain, starmap
from operator import attrgetter, contains, methodcaller
from pathlib import Path
+from subprocess import CalledProcessError
from types import ModuleType
from typing import Any, Deque, Optional, Union
from unittest.mock import patch
@@ -49,6 +50,21 @@
if STRUCT_BDD_INSTALLED:
from pytest_bdd.struct_bdd.plugin import StructBDDPlugin
+try:
+ is_npm_gherkin_installed = all(
+ [
+ check_npm(),
+ any(
+ [
+ check_npm_package("@cucumber/gherkin", global_install=True),
+ check_npm_package("@cucumber/gherkin"),
+ ]
+ ),
+ ]
+ )
+except CalledProcessError:
+ is_npm_gherkin_installed = False
+
def pytest_addhooks(pluginmanager: PytestPluginManager) -> None:
"""Register plugin hooks."""
@@ -348,17 +364,7 @@ def pytest_bdd_get_mimetype(config: Config, path: Path):
# TODO use mimetypes module
if str(path).endswith(".gherkin") or str(path).endswith(".feature"):
return Mimetype.gherkin_plain.value
- elif str(path).endswith(".gherkin.md") or str(path).endswith(".feature.md"):
- if not check_npm():
- return
-
- if not any(
- [
- check_npm_package("@cucumber/gherkin", global_install=True),
- check_npm_package("@cucumber/gherkin"),
- ]
- ):
- return
+ elif (str(path).endswith(".gherkin.md") or str(path).endswith(".feature.md")) and is_npm_gherkin_installed:
return Mimetype.markdown.value
diff --git a/src/pytest_bdd/script/bdd_tree_to_rst.py b/src/pytest_bdd/script/bdd_tree_to_rst.py
index 39bc2088..5c518cee 100644
--- a/src/pytest_bdd/script/bdd_tree_to_rst.py
+++ b/src/pytest_bdd/script/bdd_tree_to_rst.py
@@ -1,14 +1,17 @@
"""Converts directory tree containing Gherkin files into a tree which would be included into rst files
Usage:
- bdd_tree_to_rst.py
+ bdd_tree_to_rst.py [--snapshot=]
+Options:
+ --snapshot= Path to save snapshot on found diff between old and new documentation
"""
+import io
import sys
from collections import deque
from filecmp import dircmp
-from functools import reduce
+from functools import partial, reduce
from itertools import chain
from operator import methodcaller, truediv
from os.path import commonpath
@@ -17,22 +20,35 @@
from tempfile import TemporaryDirectory
from textwrap import dedent
-import pycmarkgfm
+import panflute as pf # type: ignore[import-not-found]
+import pycmarkgfm # type: ignore[import-untyped]
+import pypandoc # type: ignore[import-not-found]
from docopt import docopt
SECTION_SYMBOLS = "-#!\"$%&'()*+,./:;<=>?@[\\]^_`{|}~="
-def same_folders(dcmp):
- if any([dcmp.diff_files, dcmp.left_only, dcmp.right_only]):
- return False
- return all(map(same_folders, dcmp.subdirs.values()))
+def diff_folders(dcmp):
+ if any(diff := [dcmp.diff_files, dcmp.left_only, dcmp.right_only]):
+ dcmp.report()
+ return diff
+ if any(diff := list(map(diff_folders, dcmp.subdirs.values()))):
+ return diff
+ pass
+
+
+def adjust_heading_level(elem, doc, *, level):
+ if isinstance(elem, pf.Header):
+ new_level = elem.level + level
+ return pf.Header(*elem.content, level=new_level)
+ return elem
def convert(features_path: Path, output_path: Path, temp_path: Path):
base_output_common_path = Path(commonpath([str(features_path), str(output_path)]))
features_path_rel_to_common_path = features_path.relative_to(base_output_common_path)
output_path_rel_to_common_path = output_path.parent.relative_to(base_output_common_path)
+ # TODO move side effect from this method
index_file = temp_path / "features.rst"
output_path_rel_to_features_path = (
@@ -66,27 +82,56 @@ def convert(features_path: Path, output_path: Path, temp_path: Path):
"""
)
- gherkin_file_paths = chain(processable_path.glob("*.gherkin.md"), processable_path.glob("*.feature.md"))
+ gherkin_file_paths = chain(processable_path.glob("*.gherkin"), processable_path.glob("*.feature"))
+ markdown_gherkin_file_paths = chain(
+ processable_path.glob("*.gherkin.md"), processable_path.glob("*.feature.md")
+ )
struct_bdd_file_paths = processable_path.glob("*.bdd.yaml")
sub_processable_paths = list(filter(methodcaller("is_dir"), processable_path.iterdir()))
- for path in gherkin_file_paths:
+ for path in markdown_gherkin_file_paths:
rel_path = path.relative_to(features_path)
+ offset = len(rel_path.parts)
+
+ abs_path = temp_path / rel_path
+ abs_path.parent.mkdir(exist_ok=True, parents=True)
+
+ html_data = pycmarkgfm.gfm_to_html((features_path / rel_path).read_text())
+
+ html_content = pypandoc.convert_text(html_data, "json", format="html")
+ doc = pf.load(io.StringIO(html_content))
+
+ with io.StringIO() as f:
+ pf.dump(pf.run_filter(partial(adjust_heading_level, level=offset + 1), doc=doc), f)
+
+ contents = f.getvalue()
+ output_rst = pypandoc.convert_text(contents, "rst", format="json")
+ abs_path.with_suffix(".rst").write_text(output_rst, encoding="utf-8")
+
+ stemmed_path = Path(rel_path.stem).stem
+
+ content += dedent(
+ # language=rst
+ f"""\
+ {stemmed_path}
+ {SECTION_SYMBOLS[offset-1]*len(stemmed_path)}
- (temp_path / rel_path).parent.mkdir(exist_ok=True, parents=True)
- (temp_path / rel_path).with_suffix(".html").write_text(
- pycmarkgfm.gfm_to_html((features_path / rel_path).read_text())
+ .. include:: {(Path('features')/ path.relative_to(features_path)).with_suffix('.rst').as_posix()}
+
+ """
)
+ for path in gherkin_file_paths:
+ rel_path = path.relative_to(features_path)
content += dedent(
# language=rst
f"""\
{rel_path.stem}
- {SECTION_SYMBOLS[len(rel_path.parts)-1]*len(rel_path.stem)}
+ {SECTION_SYMBOLS[len(rel_path.parts) - 1] * len(rel_path.stem)}
- .. raw:: html
- :file: {(output_path_rel_to_features_path / path.relative_to(features_path)).with_suffix('.html').as_posix()}
+ .. include:: {(output_path_rel_to_features_path / path.relative_to(features_path)).as_posix()}
+ :code: gherkin
"""
)
@@ -116,16 +161,22 @@ def main(): # pragma: no cover
if not features_dir.exists() or not features_dir.is_dir():
raise ValueError(f"Wrong input features directory {features_dir} is provided")
output_dir = Path(arguments[""]).resolve()
+ output_dir.mkdir(parents=True, exist_ok=True)
+ snapshot_dir = Path(p) if (p := arguments.get("--snapshot")) else None
with TemporaryDirectory() as temp_dirname:
temp_dir = Path(temp_dirname)
convert(features_dir, output_dir, temp_dir)
- if not same_folders(dircmp(str(output_dir), temp_dir)):
+ if diff := diff_folders(dircmp(str(output_dir), temp_dir)):
+ if snapshot_dir is not None:
+ rmtree(snapshot_dir, ignore_errors=True)
+ copytree(output_dir, str(snapshot_dir), dirs_exist_ok=True)
+
rmtree(output_dir, ignore_errors=True)
- output_dir.mkdir(parents=True)
copytree(temp_dirname, str(output_dir), dirs_exist_ok=True)
- sys.exit("Documentation is generated and overwritten")
+
+ sys.exit(f"Documentation is generated and overwritten; Diff:{diff}")
if __name__ == "__main__": # pragma: no cover
diff --git a/src/pytest_bdd/struct_bdd/model.py b/src/pytest_bdd/struct_bdd/model.py
index 10254866..4248ea6b 100644
--- a/src/pytest_bdd/struct_bdd/model.py
+++ b/src/pytest_bdd/struct_bdd/model.py
@@ -1,4 +1,4 @@
-from collections import defaultdict, namedtuple
+from collections import defaultdict
from collections.abc import Mapping, Sequence
from enum import Enum
from functools import partial
@@ -6,7 +6,7 @@
from itertools import chain, product, starmap
from operator import attrgetter, eq, is_not
from pathlib import Path
-from typing import Annotated, Any, ClassVar, List, Literal, NamedTuple, Optional, Type, Union
+from typing import Annotated, Any, Callable, Literal, NamedTuple, Optional, Union, cast
from attr import attrib, attrs
from pydantic import ( # type:ignore[attr-defined] # migration to pydantic 2
@@ -62,16 +62,16 @@ class Node(BaseModel):
populate_by_name=True,
)
- tags: Optional[Sequence[str]] = Field(default_factory=list, alias="Tags")
+ tags: Optional[Sequence[str]] = Field(default_factory=cast(Callable, list), alias="Tags")
name: Optional[str] = Field(None, alias="Name")
description: Optional[str] = Field(None, alias="Description")
- comments: Optional[Sequence[str]] = Field(default_factory=list, alias="Comments")
+ comments: Optional[Sequence[str]] = Field(default_factory=cast(Callable, list), alias="Comments")
class Table(Node):
type: Optional[Literal["Rowed", "Columned"]] = Field("Rowed", alias="Type")
- parameters: Optional[Sequence[str]] = Field(default_factory=list, alias="Parameters")
- values: Optional[Sequence[Sequence[Any]]] = Field(default_factory=list, alias="Values")
+ parameters: Optional[Sequence[str]] = Field(default_factory=cast(Callable, list), alias="Parameters")
+ values: Optional[Sequence[Sequence[Any]]] = Field(default_factory=cast(Callable, list), alias="Values")
@property
def columned_values(self):
diff --git a/tests/doc/test_doc.py b/tests/doc/test_doc.py
index 3af3f176..02b05954 100644
--- a/tests/doc/test_doc.py
+++ b/tests/doc/test_doc.py
@@ -1,14 +1,19 @@
+import sys
from pathlib import Path
+from tempfile import TemporaryDirectory
from textwrap import dedent
from typing import TYPE_CHECKING
-from pytest_bdd.script.bdd_tree_to_rst import convert
+from pytest import mark
if TYPE_CHECKING: # pragma: no cover
from pytest_bdd.compatibility.pytest import Testdir
+@mark.skipif(sys.version_info < (3, 12), reason="Verify only on the latest version")
def test_doc_generation(testdir: "Testdir"):
+ from pytest_bdd.script.bdd_tree_to_rst import convert
+
features_path = Path(testdir.tmpdir) / "features"
features_path.mkdir()
(features_path / "simple.gherkin").write_text(
@@ -17,6 +22,12 @@ def test_doc_generation(testdir: "Testdir"):
Feature: Do nothing
"""
)
+ (features_path / "simple_markdown.gherkin.md").write_text(
+ # language=gherkin
+ """
+ # Feature: Simple gherkin markdown
+ """
+ )
(features_path / "extra").mkdir()
(features_path / "extra" / "other_simple.gherkin").write_text(
# language=gherkin
@@ -28,32 +39,40 @@ def test_doc_generation(testdir: "Testdir"):
output_path = Path(testdir.tmpdir) / "output"
output_path.mkdir()
- output = convert(features_path.resolve(), output_path.resolve())
- assert output == dedent(
- # language=rst
- """\
- Features
- ========
+ with TemporaryDirectory() as temp_dirname:
+ temp_path = Path(temp_dirname)
+ convert(features_path.resolve(), output_path.resolve(), temp_path)
- .. NOTE:: Features below are part of end-to-end test suite; You always could find most specific
- use cases of **pytest-bdd-ng** by investigation of its regression
- test suite https://github.com/elchupanebrej/pytest-bdd-ng/tree/default/tests
+ assert (temp_path / "features.rst").read_text() == dedent(
+ # language=rst
+ """\
+ Features
+ ========
+ .. NOTE:: Features below are part of end-to-end test suite; You always could find most specific
+ use cases of **pytest-bdd-ng** by investigation of its regression
+ test suite https://github.com/elchupanebrej/pytest-bdd-ng/tree/default/tests
- simple
- ------
- .. include:: features/simple.gherkin
- :code: gherkin
+ simple_markdown
+ ---------------
- extra
- -----
+ .. include:: features/simple_markdown.gherkin.rst
- other_simple
- ############
+ simple
+ ------
- .. include:: features/extra/other_simple.gherkin
- :code: gherkin
- """
- )
+ .. include:: features/simple.gherkin
+ :code: gherkin
+
+ extra
+ -----
+
+ other_simple
+ ############
+
+ .. include:: features/extra/other_simple.gherkin
+ :code: gherkin
+ """
+ )
diff --git a/tests/feature/test_markdown.py b/tests/feature/test_markdown.py
index 406bcc9f..7d643ad6 100644
--- a/tests/feature/test_markdown.py
+++ b/tests/feature/test_markdown.py
@@ -3,7 +3,6 @@
import pytest
-@pytest.mark.xfail(reason="https://github.com/cucumber/gherkin/pull/64")
def test_markdown(testdir):
testdir.makefile(
".feature.md",
diff --git a/tests/feature/test_no_scenario.py b/tests/feature/test_no_scenario.py
index 8a0b74f2..72573a5c 100644
--- a/tests/feature/test_no_scenario.py
+++ b/tests/feature/test_no_scenario.py
@@ -22,4 +22,4 @@ def test_no_scenarios(testdir):
"""
)
result = testdir.runpytest()
- result.stdout.fnmatch_lines(["*FeatureError*"])
+ result.stdout.fnmatch_lines(["*FeatureConcreteParseError*"])
diff --git a/tests/feature/test_outline.py b/tests/feature/test_outline.py
index 99573074..3b6640e5 100644
--- a/tests/feature/test_outline.py
+++ b/tests/feature/test_outline.py
@@ -165,7 +165,7 @@ def test_wrongly_outlined_missing_parameter_scenario(testdir):
result = testdir.runpytest()
assert_outcomes(result, errors=1)
- result.stdout.fnmatch_lines("*FeatureError*")
+ result.stdout.fnmatch_lines("*FeatureConcreteParseError*")
def test_outlined_with_other_fixtures(testdir, tmp_path):
diff --git a/tests/feature/test_tags.py b/tests/feature/test_tags.py
index af23dea3..cbd92000 100644
--- a/tests/feature/test_tags.py
+++ b/tests/feature/test_tags.py
@@ -160,4 +160,4 @@ def test_invalid_tags(testdir):
ensure=True,
)
result = testdir.runpytest()
- result.stdout.fnmatch_lines(["*FeatureError*"])
+ result.stdout.fnmatch_lines(["*FeatureConcreteParseError*"])
diff --git a/tests/feature/test_wrong.py b/tests/feature/test_wrong.py
index ca7762b0..6da8759b 100644
--- a/tests/feature/test_wrong.py
+++ b/tests/feature/test_wrong.py
@@ -40,4 +40,4 @@ def test_multiple_features_single_file(testdir):
)
result = testdir.runpytest()
assert_outcomes(result, errors=1)
- result.stdout.fnmatch_lines("*FeatureError: *")
+ result.stdout.fnmatch_lines("*FeatureConcreteParseError: *")