diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 52dc486..9dcdece 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -26,7 +26,9 @@ jobs:
         pip install -U pip setuptools wheel
         pip install -r dev-requirements.txt
     - name: Run tests
-      run: pytest
+      run: pytest --ignore-glob="*.shouldfail.yml"
+    - name: Run test with expected failures
+      run: pytest pytest_mypy_plugins/tests/*.shouldfail.yml 2>&1 | grep "5 failed"
 
   lint:
     runs-on: ubuntu-latest
diff --git a/.gitignore b/.gitignore
index 5ba8ffa..b7258a6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,3 +4,5 @@
 __pycache__
 dist/
 build/
+.pytest_cache/
+venv/
diff --git a/README.md b/README.md
index 598e44e..a370c12 100644
--- a/README.md
+++ b/README.md
@@ -52,16 +52,17 @@ You can also specify `PYTHONPATH`, `MYPYPATH`, or any other environment variable
 In general each test case is just an element in an array written in a properly formatted `YAML` file.
 On top of that, each case must comply to following types:
 
-| Property        | Type                                                   | Description                                                                                                     |
-| --------------- | ------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------- |
-| `case`          | `str`                                                  | Name of the test case, complies to `[a-zA-Z0-9]` pattern                                                        |
-| `main`          | `str`                                                  | Portion of the code as if written in `.py` file                                                                 |
-| `files`         | `Optional[List[File]]=[]`\*                            | List of extra files to simulate imports if needed                                                               |
-| `disable_cache` | `Optional[bool]=False`                                 | Set to `true` disables `mypy` caching                                                                           |
-| `mypy_config`   | `Optional[Dict[str, Union[str, int, bool, float]]]={}` | Inline `mypy` configuration, passed directly to `mypy` as `--config-file` option                                |
-| `env`           | `Optional[Dict[str, str]]={}`                          | Environmental variables to be provided inside of test run                                                       |
-| `parametrized`  | `Optional[List[Parameter]]=[]`\*                       | List of parameters, similar to [`@pytest.mark.parametrize`](https://docs.pytest.org/en/stable/parametrize.html) |
-| `skip`          | `str`                                                  | Expression evaluated with following globals set: `sys`, `os`, `pytest` and `platform`                           |
+| Property        | Type                                                   | Description                                                                                                         |
+| --------------- | ------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------- |
+| `case`          | `str`                                                  | Name of the test case, complies to `[a-zA-Z0-9]` pattern                                                            |
+| `main`          | `str`                                                  | Portion of the code as if written in `.py` file                                                                     |
+| `files`         | `Optional[List[File]]=[]`\*                            | List of extra files to simulate imports if needed                                                                   |
+| `disable_cache` | `Optional[bool]=False`                                 | Set to `true` disables `mypy` caching                                                                               |
+| `mypy_config`   | `Optional[Dict[str, Union[str, int, bool, float]]]={}` | Inline `mypy` configuration, passed directly to `mypy` as `--config-file` option                                    |
+| `env`           | `Optional[Dict[str, str]]={}`                          | Environmental variables to be provided inside of test run                                                           |
+| `parametrized`  | `Optional[List[Parameter]]=[]`\*                       | List of parameters, similar to [`@pytest.mark.parametrize`](https://docs.pytest.org/en/stable/parametrize.html)     |
+| `skip`          | `str`                                                  | Expression evaluated with following globals set: `sys`, `os`, `pytest` and `platform`                               |
+| `regex`         | `str`                                                  | Allow regular expressions in comments to be matched against actual output. Defaults to "no", i.e. matches full text.|
 
 (*) Appendix to **pseudo** types used above:
 
@@ -126,6 +127,27 @@ Implementation notes:
     main:1: note: Revealed type is 'builtins.str'
 ```
 
+#### 4. Regular expressions in expectations
+
+```yaml
+- case: expected_message_regex_with_out
+  regex: yes
+  main: |
+    a = 'abc'
+    reveal_type(a)
+  out: |
+    main:2: note: .*str.*
+```
+
+#### 5. Regular expressions specific lines of output.
+
+```yaml
+- case: expected_single_message_regex
+  main: |
+    a = 'hello'
+    reveal_type(a)  # NR: .*str.*
+```
+
 ## Options
 
 ```
diff --git a/pytest_mypy_plugins/collect.py b/pytest_mypy_plugins/collect.py
index a527f71..7d33518 100644
--- a/pytest_mypy_plugins/collect.py
+++ b/pytest_mypy_plugins/collect.py
@@ -101,16 +101,21 @@ def collect(self) -> Iterator["YamlTestItem"]:
                 test_name = f"{test_name_prefix}{test_name_suffix}"
                 main_file = File(path="main.py", content=pystache.render(raw_test["main"], params))
                 test_files = [main_file] + parse_test_files(raw_test.get("files", []))
+                regex = raw_test.get("regex", False)
 
-                output_from_comments = []
+                expected_output = []
                 for test_file in test_files:
-                    output_lines = utils.extract_errors_from_comments(test_file.path, test_file.content.split("\n"))
-                    output_from_comments.extend(output_lines)
+                    output_lines = utils.extract_output_matchers_from_comments(
+                        test_file.path, test_file.content.split("\n"), regex=regex
+                    )
+                    expected_output.extend(output_lines)
 
                 starting_lineno = raw_test["__line__"]
                 extra_environment_variables = parse_environment_variables(raw_test.get("env", []))
                 disable_cache = raw_test.get("disable_cache", False)
-                expected_output_lines = pystache.render(raw_test.get("out", ""), params).split("\n")
+                expected_output.extend(
+                    utils.extract_output_matchers_from_out(raw_test.get("out", ""), params, regex=regex)
+                )
                 additional_mypy_config = raw_test.get("mypy_config", "")
 
                 skip = self._eval_skip(str(raw_test.get("skip", "False")))
@@ -122,7 +127,7 @@ def collect(self) -> Iterator["YamlTestItem"]:
                         starting_lineno=starting_lineno,
                         environment_variables=extra_environment_variables,
                         disable_cache=disable_cache,
-                        expected_output_lines=output_from_comments + expected_output_lines,
+                        expected_output=expected_output,
                         parsed_test_data=raw_test,
                         mypy_config=additional_mypy_config,
                     )
diff --git a/pytest_mypy_plugins/item.py b/pytest_mypy_plugins/item.py
index 775f731..c2d89fd 100644
--- a/pytest_mypy_plugins/item.py
+++ b/pytest_mypy_plugins/item.py
@@ -32,8 +32,9 @@
 from pytest_mypy_plugins import utils
 from pytest_mypy_plugins.collect import File, YamlTestFile
 from pytest_mypy_plugins.utils import (
+    OutputMatcher,
     TypecheckAssertionError,
-    assert_string_arrays_equal,
+    assert_expected_matched_actual,
     capture_std_streams,
     fname_to_module,
 )
@@ -124,7 +125,7 @@ def __init__(
         *,
         files: List[File],
         starting_lineno: int,
-        expected_output_lines: List[str],
+        expected_output: List[OutputMatcher],
         environment_variables: Dict[str, Any],
         disable_cache: bool,
         mypy_config: str,
@@ -134,7 +135,7 @@ def __init__(
         self.files = files
         self.environment_variables = environment_variables
         self.disable_cache = disable_cache
-        self.expected_output_lines = expected_output_lines
+        self.expected_output = expected_output
         self.starting_lineno = starting_lineno
         self.additional_mypy_config = mypy_config
         self.parsed_test_data = parsed_test_data
@@ -279,7 +280,7 @@ def runtest(self) -> None:
                 for line in mypy_output.splitlines():
                     output_line = replace_fpath_with_module_name(line, rootdir=execution_path)
                     output_lines.append(output_line)
-                assert_string_arrays_equal(expected=self.expected_output_lines, actual=output_lines)
+                assert_expected_matched_actual(expected=self.expected_output, actual=output_lines)
         finally:
             temp_dir.cleanup()
             # remove created modules and all their dependants from cache
diff --git a/pytest_mypy_plugins/tests/test-regex_assertions.shouldfail.yml b/pytest_mypy_plugins/tests/test-regex_assertions.shouldfail.yml
new file mode 100644
index 0000000..7d064ea
--- /dev/null
+++ b/pytest_mypy_plugins/tests/test-regex_assertions.shouldfail.yml
@@ -0,0 +1,16 @@
+- case: rexex_but_not_turned_on
+  main: |
+    a = 'hello'
+    reveal_type(a)  # N: .*str.*
+
+- case: rexex_but_turned_off
+  regex: no
+  main: |
+    a = 'hello'
+    reveal_type(a)  # N: .*str.*
+
+- case: regext_does_not_match
+  regex: no
+  main: |
+    a = 'hello'
+    reveal_type(a)  # NR: .*banana.*
diff --git a/pytest_mypy_plugins/tests/test-regex_assertions.yml b/pytest_mypy_plugins/tests/test-regex_assertions.yml
new file mode 100644
index 0000000..0cdf230
--- /dev/null
+++ b/pytest_mypy_plugins/tests/test-regex_assertions.yml
@@ -0,0 +1,22 @@
+- case: expected_message_regex
+  regex: yes
+  main: |
+    a = 1
+    b = 'hello'
+
+    reveal_type(a)  # N: Revealed type is "builtins.int"
+    reveal_type(b)  # N: .*str.*
+
+- case: expected_message_regex_with_out
+  regex: yes
+  main: |
+    a = 'abc'
+    reveal_type(a)
+  out: |
+    main:2: note: .*str.*
+
+- case: expected_single_message_regex
+  regex: no
+  main: |
+    a = 'hello'
+    reveal_type(a)  # NR: .*str.*
diff --git a/pytest_mypy_plugins/tests/test-simple-cases.shouldfail.yml b/pytest_mypy_plugins/tests/test-simple-cases.shouldfail.yml
new file mode 100644
index 0000000..032e96b
--- /dev/null
+++ b/pytest_mypy_plugins/tests/test-simple-cases.shouldfail.yml
@@ -0,0 +1,12 @@
+- case: fail_if_message_does_not_match
+  main: |
+    a = 'hello'
+    reveal_type(a)  # N: Some other message
+
+- case: fail_if_message_from_outdoes_not_match
+  regex: yes
+  main: |
+    a = 'abc'
+    reveal_type(a)
+  out: |
+    main:2: note: Some other message
\ No newline at end of file
diff --git a/pytest_mypy_plugins/utils.py b/pytest_mypy_plugins/utils.py
index 40ec467..2e87e2c 100644
--- a/pytest_mypy_plugins/utils.py
+++ b/pytest_mypy_plugins/utils.py
@@ -6,9 +6,21 @@
 import os
 import re
 import sys
+from dataclasses import dataclass
 from pathlib import Path
-from typing import Callable, Iterator, List, Optional, Tuple, Union
-
+from typing import (
+    Any,
+    Callable,
+    Iterator,
+    List,
+    Mapping,
+    Optional,
+    Tuple,
+    Union,
+)
+
+import pystache
+import regex
 from decorator import contextmanager
 
 
@@ -55,6 +67,42 @@ def fname_to_module(fpath: Path, root_path: Path) -> Optional[str]:
 MIN_LINE_LENGTH_FOR_ALIGNMENT = 5
 
 
+@dataclass
+class OutputMatcher:
+    fname: str
+    lnum: int
+    severity: str
+    message: str
+    regex: bool
+    col: Optional[str] = None
+
+    def matches(self, actual: str) -> bool:
+        if self.regex:
+            pattern = (
+                regex.escape(
+                    f"{self.fname}:{self.lnum}: {self.severity}: "
+                    if self.col is None
+                    else f"{self.fname}:{self.lnum}:{self.col}: {self.severity}: "
+                )
+                + self.message
+            )
+            return regex.match(pattern, actual)
+        else:
+            return str(self) == actual
+
+    def __str__(self) -> str:
+        if self.col is None:
+            return f"{self.fname}:{self.lnum}: {self.severity}: {self.message}"
+        else:
+            return f"{self.fname}:{self.lnum}:{self.col}: {self.severity}: {self.message}"
+
+    def __format__(self, format_spec: str) -> str:
+        return format_spec.format(str(self))
+
+    def __len__(self) -> int:
+        return len(str(self))
+
+
 class TypecheckAssertionError(AssertionError):
     def __init__(self, error_message: Optional[str] = None, lineno: int = 0) -> None:
         self.error_message = error_message or ""
@@ -81,16 +129,16 @@ def remove_common_prefix(lines: List[str]) -> List[str]:
     return cleaned_lines
 
 
-def _num_skipped_prefix_lines(a1: List[str], a2: List[str]) -> int:
+def _num_skipped_prefix_lines(a1: List[OutputMatcher], a2: List[str]) -> int:
     num_eq = 0
-    while num_eq < min(len(a1), len(a2)) and a1[num_eq] == a2[num_eq]:
+    while num_eq < min(len(a1), len(a2)) and a1[num_eq].matches(a2[num_eq]):
         num_eq += 1
     return max(0, num_eq - 4)
 
 
-def _num_skipped_suffix_lines(a1: List[str], a2: List[str]) -> int:
+def _num_skipped_suffix_lines(a1: List[OutputMatcher], a2: List[str]) -> int:
     num_eq = 0
-    while num_eq < min(len(a1), len(a2)) and a1[-num_eq - 1] == a2[-num_eq - 1]:
+    while num_eq < min(len(a1), len(a2)) and a1[-num_eq - 1].matches(a2[-num_eq - 1]):
         num_eq += 1
     return max(0, num_eq - 4)
 
@@ -171,18 +219,18 @@ def extract_parts_as_tuple(line: str) -> Tuple[str, int, str]:
     return sorted(lines, key=extract_parts_as_tuple)
 
 
-def assert_string_arrays_equal(expected: List[str], actual: List[str]) -> None:
+def assert_expected_matched_actual(expected: List[OutputMatcher], actual: List[str]) -> None:
     """Assert that two string arrays are equal.
 
     Display any differences in a human-readable form.
     """
-    expected = sorted_by_file_and_line(remove_empty_lines(expected))
+    expected = sorted(expected, key=lambda om: (om.fname, om.lnum))
     actual = sorted_by_file_and_line(remove_empty_lines(actual))
 
     actual = remove_common_prefix(actual)
     error_message = ""
 
-    if expected != actual:
+    if not all(e.matches(a) for e, a in zip(expected, actual)):
         num_skip_start = _num_skipped_prefix_lines(expected, actual)
         num_skip_end = _num_skipped_suffix_lines(expected, actual)
 
@@ -200,13 +248,13 @@ def assert_string_arrays_equal(expected: List[str], actual: List[str]) -> None:
         width = 100
 
         for i in range(num_skip_start, len(expected) - num_skip_end):
-            if i >= len(actual) or expected[i] != actual[i]:
+            if i >= len(actual) or not expected[i].matches(actual[i]):
                 if first_diff < 0:
                     first_diff = i
                 error_message += "  {:<45} (diff)".format(expected[i])
             else:
                 e = expected[i]
-                error_message += "  " + e[:width]
+                error_message += "  " + str(e)[:width]
                 if len(e) > width:
                     error_message += "..."
             error_message += "\n"
@@ -219,7 +267,7 @@ def assert_string_arrays_equal(expected: List[str], actual: List[str]) -> None:
             error_message += "  ...\n"
 
         for j in range(num_skip_start, len(actual) - num_skip_end):
-            if j >= len(expected) or expected[j] != actual[j]:
+            if j >= len(expected) or not expected[j].matches(actual[j]):
                 error_message += "  {:<45} (diff)".format(actual[j])
             else:
                 a = actual[j]
@@ -227,7 +275,7 @@ def assert_string_arrays_equal(expected: List[str], actual: List[str]) -> None:
                 if len(a) > width:
                     error_message += "..."
             error_message += "\n"
-        if actual == []:
+        if not actual:
             error_message += "  (empty)\n"
         if num_skip_end > 0:
             error_message += "  ...\n"
@@ -240,46 +288,84 @@ def assert_string_arrays_equal(expected: List[str], actual: List[str]) -> None:
         ):
             # Display message that helps visualize the differences between two
             # long lines.
-            error_message = _add_aligned_message(expected[first_diff], actual[first_diff], error_message)
+            error_message = _add_aligned_message(str(expected[first_diff]), actual[first_diff], error_message)
 
         if len(expected) == 0:
             raise TypecheckAssertionError(f"Output is not expected: \n{error_message}")
 
         first_failure = expected[first_diff]
         if first_failure:
-            lineno = int(first_failure.split(" ")[0].strip(":").split(":")[1])
-            raise TypecheckAssertionError(error_message=f"Invalid output: \n{error_message}", lineno=lineno)
-
+            raise TypecheckAssertionError(error_message=f"Invalid output: \n{error_message}", lineno=first_failure.lnum)
 
-def build_output_line(fname: str, lnum: int, severity: str, message: str, col: Optional[str] = None) -> str:
-    if col is None:
-        return f"{fname}:{lnum + 1}: {severity}: {message}"
-    else:
-        return f"{fname}:{lnum + 1}:{col}: {severity}: {message}"
 
-
-def extract_errors_from_comments(fname: str, input_lines: List[str]) -> List[str]:
+def extract_output_matchers_from_comments(fname: str, input_lines: List[str], regex: bool) -> List[OutputMatcher]:
     """Transform comments such as '# E: message' or
     '# E:3: message' in input.
 
-    The result is lines like 'fnam:line: error: message'.
+    The result is a list pf output matchers
     """
     fname = fname.replace(".py", "")
-    output_lines = []
-    for lnum, line in enumerate(input_lines):
+    matchers = []
+    for index, line in enumerate(input_lines):
         # The first in the split things isn't a comment
         for possible_err_comment in line.split(" # ")[1:]:
-            m = re.search(r"^([ENW]):((?P<col>\d+):)? (?P<message>.*)$", possible_err_comment.strip())
-            if m:
-                if m.group(1) == "E":
+            match = re.search(
+                r"^([ENW])(?P<regex>[R])?:((?P<col>\d+):)? (?P<message>.*)$", possible_err_comment.strip()
+            )
+            if match:
+                if match.group(1) == "E":
                     severity = "error"
-                elif m.group(1) == "N":
+                elif match.group(1) == "N":
                     severity = "note"
-                elif m.group(1) == "W":
+                elif match.group(1) == "W":
                     severity = "warning"
-                col = m.group("col")
-                output_lines.append(build_output_line(fname, lnum, severity, message=m.group("message"), col=col))
-    return output_lines
+                else:
+                    severity = match.group(1)
+                col = match.group("col")
+                matchers.append(
+                    OutputMatcher(
+                        fname,
+                        index + 1,
+                        severity,
+                        message=match.group("message"),
+                        regex=regex or bool(match.group("regex")),
+                        col=col,
+                    )
+                )
+    return matchers
+
+
+def extract_output_matchers_from_out(out: str, params: Mapping[str, Any], regex: bool) -> List[OutputMatcher]:
+    """Transform output lines such as 'function:9: E: message'
+
+    The result is a list of output matchers
+    """
+    matchers = []
+    for line in pystache.render(out, params).split("\n"):
+        match = re.search(
+            r"^(?P<fname>.*):(?P<lnum>\d*): (?P<severity>.*):((?P<col>\d+):)? (?P<message>.*)$", line.strip()
+        )
+        if match:
+            if match.group("severity") == "E":
+                severity = "error"
+            elif match.group("severity") == "N":
+                severity = "note"
+            elif match.group("severity") == "W":
+                severity = "warning"
+            else:
+                severity = match.group("severity")
+            col = match.group("col")
+            matchers.append(
+                OutputMatcher(
+                    match.group("fname"),
+                    int(match.group("lnum")),
+                    severity,
+                    message=match.group("message"),
+                    regex=regex,
+                    col=col,
+                )
+            )
+    return matchers
 
 
 def get_func_first_lnum(attr: Callable[..., None]) -> Optional[Tuple[int, List[str]]]: