diff --git a/rest_framework/fields.py b/rest_framework/fields.py index 89c0a714c6..84cf5ffd94 100644 --- a/rest_framework/fields.py +++ b/rest_framework/fields.py @@ -1080,7 +1080,6 @@ def validate_precision(self, value): self.fail('max_whole_digits', max_whole_digits=self.max_whole_digits) return value - def to_representation(self, value): coerce_to_string = getattr(self, 'coerce_to_string', api_settings.COERCE_DECIMAL_TO_STRING) @@ -1095,11 +1094,17 @@ def to_representation(self, value): quantized = self.quantize(value) + # Apply optional custom rounding if specified + if self.rounding is not None: + quant = decimal.Decimal(1).scaleb(-self.decimal_places) + quantized = quantized.quantize(quant, rounding=self.rounding) + if self.normalize_output: quantized = quantized.normalize() if not coerce_to_string: return quantized + if self.localize: return localize_input(quantized) diff --git a/tests/test_fields.py b/tests/test_fields.py index d574b07ebc..b0acd2601f 100644 --- a/tests/test_fields.py +++ b/tests/test_fields.py @@ -2710,3 +2710,10 @@ def validate(self, obj): ), ] } +from decimal import Decimal, ROUND_HALF_UP +from rest_framework.fields import DecimalField + +def test_decimalfield_rounding_half_up(): + field = DecimalField(decimal_places=1, max_digits=4, rounding=ROUND_HALF_UP) + result = field.to_representation(Decimal('7.25')) + assert result == '7.3' diff --git a/venv/bin/Activate.ps1 b/venv/bin/Activate.ps1 new file mode 100644 index 0000000000..b49d77ba44 --- /dev/null +++ b/venv/bin/Activate.ps1 @@ -0,0 +1,247 @@ +<# +.Synopsis +Activate a Python virtual environment for the current PowerShell session. + +.Description +Pushes the python executable for a virtual environment to the front of the +$Env:PATH environment variable and sets the prompt to signify that you are +in a Python virtual environment. Makes use of the command line switches as +well as the `pyvenv.cfg` file values present in the virtual environment. + +.Parameter VenvDir +Path to the directory that contains the virtual environment to activate. The +default value for this is the parent of the directory that the Activate.ps1 +script is located within. + +.Parameter Prompt +The prompt prefix to display when this virtual environment is activated. By +default, this prompt is the name of the virtual environment folder (VenvDir) +surrounded by parentheses and followed by a single space (ie. '(.venv) '). + +.Example +Activate.ps1 +Activates the Python virtual environment that contains the Activate.ps1 script. + +.Example +Activate.ps1 -Verbose +Activates the Python virtual environment that contains the Activate.ps1 script, +and shows extra information about the activation as it executes. + +.Example +Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv +Activates the Python virtual environment located in the specified location. + +.Example +Activate.ps1 -Prompt "MyPython" +Activates the Python virtual environment that contains the Activate.ps1 script, +and prefixes the current prompt with the specified string (surrounded in +parentheses) while the virtual environment is active. + +.Notes +On Windows, it may be required to enable this Activate.ps1 script by setting the +execution policy for the user. You can do this by issuing the following PowerShell +command: + +PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser + +For more information on Execution Policies: +https://go.microsoft.com/fwlink/?LinkID=135170 + +#> +Param( + [Parameter(Mandatory = $false)] + [String] + $VenvDir, + [Parameter(Mandatory = $false)] + [String] + $Prompt +) + +<# Function declarations --------------------------------------------------- #> + +<# +.Synopsis +Remove all shell session elements added by the Activate script, including the +addition of the virtual environment's Python executable from the beginning of +the PATH variable. + +.Parameter NonDestructive +If present, do not remove this function from the global namespace for the +session. + +#> +function global:deactivate ([switch]$NonDestructive) { + # Revert to original values + + # The prior prompt: + if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) { + Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt + Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT + } + + # The prior PYTHONHOME: + if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) { + Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME + Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME + } + + # The prior PATH: + if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) { + Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH + Remove-Item -Path Env:_OLD_VIRTUAL_PATH + } + + # Just remove the VIRTUAL_ENV altogether: + if (Test-Path -Path Env:VIRTUAL_ENV) { + Remove-Item -Path env:VIRTUAL_ENV + } + + # Just remove VIRTUAL_ENV_PROMPT altogether. + if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) { + Remove-Item -Path env:VIRTUAL_ENV_PROMPT + } + + # Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether: + if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) { + Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force + } + + # Leave deactivate function in the global namespace if requested: + if (-not $NonDestructive) { + Remove-Item -Path function:deactivate + } +} + +<# +.Description +Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the +given folder, and returns them in a map. + +For each line in the pyvenv.cfg file, if that line can be parsed into exactly +two strings separated by `=` (with any amount of whitespace surrounding the =) +then it is considered a `key = value` line. The left hand string is the key, +the right hand is the value. + +If the value starts with a `'` or a `"` then the first and last character is +stripped from the value before being captured. + +.Parameter ConfigDir +Path to the directory that contains the `pyvenv.cfg` file. +#> +function Get-PyVenvConfig( + [String] + $ConfigDir +) { + Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg" + + # Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue). + $pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue + + # An empty map will be returned if no config file is found. + $pyvenvConfig = @{ } + + if ($pyvenvConfigPath) { + + Write-Verbose "File exists, parse `key = value` lines" + $pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath + + $pyvenvConfigContent | ForEach-Object { + $keyval = $PSItem -split "\s*=\s*", 2 + if ($keyval[0] -and $keyval[1]) { + $val = $keyval[1] + + # Remove extraneous quotations around a string value. + if ("'""".Contains($val.Substring(0, 1))) { + $val = $val.Substring(1, $val.Length - 2) + } + + $pyvenvConfig[$keyval[0]] = $val + Write-Verbose "Adding Key: '$($keyval[0])'='$val'" + } + } + } + return $pyvenvConfig +} + + +<# Begin Activate script --------------------------------------------------- #> + +# Determine the containing directory of this script +$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition +$VenvExecDir = Get-Item -Path $VenvExecPath + +Write-Verbose "Activation script is located in path: '$VenvExecPath'" +Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)" +Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)" + +# Set values required in priority: CmdLine, ConfigFile, Default +# First, get the location of the virtual environment, it might not be +# VenvExecDir if specified on the command line. +if ($VenvDir) { + Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values" +} +else { + Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir." + $VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/") + Write-Verbose "VenvDir=$VenvDir" +} + +# Next, read the `pyvenv.cfg` file to determine any required value such +# as `prompt`. +$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir + +# Next, set the prompt from the command line, or the config file, or +# just use the name of the virtual environment folder. +if ($Prompt) { + Write-Verbose "Prompt specified as argument, using '$Prompt'" +} +else { + Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value" + if ($pyvenvCfg -and $pyvenvCfg['prompt']) { + Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'" + $Prompt = $pyvenvCfg['prompt']; + } + else { + Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)" + Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'" + $Prompt = Split-Path -Path $venvDir -Leaf + } +} + +Write-Verbose "Prompt = '$Prompt'" +Write-Verbose "VenvDir='$VenvDir'" + +# Deactivate any currently active virtual environment, but leave the +# deactivate function in place. +deactivate -nondestructive + +# Now set the environment variable VIRTUAL_ENV, used by many tools to determine +# that there is an activated venv. +$env:VIRTUAL_ENV = $VenvDir + +if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) { + + Write-Verbose "Setting prompt to '$Prompt'" + + # Set the prompt to include the env name + # Make sure _OLD_VIRTUAL_PROMPT is global + function global:_OLD_VIRTUAL_PROMPT { "" } + Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT + New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt + + function global:prompt { + Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) " + _OLD_VIRTUAL_PROMPT + } + $env:VIRTUAL_ENV_PROMPT = $Prompt +} + +# Clear PYTHONHOME +if (Test-Path -Path Env:PYTHONHOME) { + Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME + Remove-Item -Path Env:PYTHONHOME +} + +# Add the venv to the PATH +Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH +$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH" diff --git a/venv/bin/activate b/venv/bin/activate new file mode 100644 index 0000000000..7df16a01bf --- /dev/null +++ b/venv/bin/activate @@ -0,0 +1,70 @@ +# This file must be used with "source bin/activate" *from bash* +# You cannot run it directly + +deactivate () { + # reset old environment variables + if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then + PATH="${_OLD_VIRTUAL_PATH:-}" + export PATH + unset _OLD_VIRTUAL_PATH + fi + if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then + PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}" + export PYTHONHOME + unset _OLD_VIRTUAL_PYTHONHOME + fi + + # Call hash to forget past commands. Without forgetting + # past commands the $PATH changes we made may not be respected + hash -r 2> /dev/null + + if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then + PS1="${_OLD_VIRTUAL_PS1:-}" + export PS1 + unset _OLD_VIRTUAL_PS1 + fi + + unset VIRTUAL_ENV + unset VIRTUAL_ENV_PROMPT + if [ ! "${1:-}" = "nondestructive" ] ; then + # Self destruct! + unset -f deactivate + fi +} + +# unset irrelevant variables +deactivate nondestructive + +# on Windows, a path can contain colons and backslashes and has to be converted: +if [ "${OSTYPE:-}" = "cygwin" ] || [ "${OSTYPE:-}" = "msys" ] ; then + # transform D:\path\to\venv to /d/path/to/venv on MSYS + # and to /cygdrive/d/path/to/venv on Cygwin + export VIRTUAL_ENV=$(cygpath "/Users/darshanreddy/django-rest-frameworK/venv") +else + # use the path as-is + export VIRTUAL_ENV="/Users/darshanreddy/django-rest-frameworK/venv" +fi + +_OLD_VIRTUAL_PATH="$PATH" +PATH="$VIRTUAL_ENV/bin:$PATH" +export PATH + +# unset PYTHONHOME if set +# this will fail if PYTHONHOME is set to the empty string (which is bad anyway) +# could use `if (set -u; : $PYTHONHOME) ;` in bash +if [ -n "${PYTHONHOME:-}" ] ; then + _OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}" + unset PYTHONHOME +fi + +if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then + _OLD_VIRTUAL_PS1="${PS1:-}" + PS1="(venv) ${PS1:-}" + export PS1 + VIRTUAL_ENV_PROMPT="(venv) " + export VIRTUAL_ENV_PROMPT +fi + +# Call hash to forget past commands. Without forgetting +# past commands the $PATH changes we made may not be respected +hash -r 2> /dev/null diff --git a/venv/bin/activate.csh b/venv/bin/activate.csh new file mode 100644 index 0000000000..99994a789e --- /dev/null +++ b/venv/bin/activate.csh @@ -0,0 +1,27 @@ +# This file must be used with "source bin/activate.csh" *from csh*. +# You cannot run it directly. + +# Created by Davide Di Blasi . +# Ported to Python 3.3 venv by Andrew Svetlov + +alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate' + +# Unset irrelevant variables. +deactivate nondestructive + +setenv VIRTUAL_ENV "/Users/darshanreddy/django-rest-frameworK/venv" + +set _OLD_VIRTUAL_PATH="$PATH" +setenv PATH "$VIRTUAL_ENV/bin:$PATH" + + +set _OLD_VIRTUAL_PROMPT="$prompt" + +if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then + set prompt = "(venv) $prompt" + setenv VIRTUAL_ENV_PROMPT "(venv) " +endif + +alias pydoc python -m pydoc + +rehash diff --git a/venv/bin/activate.fish b/venv/bin/activate.fish new file mode 100644 index 0000000000..b0ff2b773a --- /dev/null +++ b/venv/bin/activate.fish @@ -0,0 +1,69 @@ +# This file must be used with "source /bin/activate.fish" *from fish* +# (https://fishshell.com/). You cannot run it directly. + +function deactivate -d "Exit virtual environment and return to normal shell environment" + # reset old environment variables + if test -n "$_OLD_VIRTUAL_PATH" + set -gx PATH $_OLD_VIRTUAL_PATH + set -e _OLD_VIRTUAL_PATH + end + if test -n "$_OLD_VIRTUAL_PYTHONHOME" + set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME + set -e _OLD_VIRTUAL_PYTHONHOME + end + + if test -n "$_OLD_FISH_PROMPT_OVERRIDE" + set -e _OLD_FISH_PROMPT_OVERRIDE + # prevents error when using nested fish instances (Issue #93858) + if functions -q _old_fish_prompt + functions -e fish_prompt + functions -c _old_fish_prompt fish_prompt + functions -e _old_fish_prompt + end + end + + set -e VIRTUAL_ENV + set -e VIRTUAL_ENV_PROMPT + if test "$argv[1]" != "nondestructive" + # Self-destruct! + functions -e deactivate + end +end + +# Unset irrelevant variables. +deactivate nondestructive + +set -gx VIRTUAL_ENV "/Users/darshanreddy/django-rest-frameworK/venv" + +set -gx _OLD_VIRTUAL_PATH $PATH +set -gx PATH "$VIRTUAL_ENV/bin" $PATH + +# Unset PYTHONHOME if set. +if set -q PYTHONHOME + set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME + set -e PYTHONHOME +end + +if test -z "$VIRTUAL_ENV_DISABLE_PROMPT" + # fish uses a function instead of an env var to generate the prompt. + + # Save the current fish_prompt function as the function _old_fish_prompt. + functions -c fish_prompt _old_fish_prompt + + # With the original prompt function renamed, we can override with our own. + function fish_prompt + # Save the return status of the last command. + set -l old_status $status + + # Output the venv prompt; color taken from the blue of the Python logo. + printf "%s%s%s" (set_color 4B8BBE) "(venv) " (set_color normal) + + # Restore the return status of the previous command. + echo "exit $old_status" | . + # Output the original/"old" prompt. + _old_fish_prompt + end + + set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV" + set -gx VIRTUAL_ENV_PROMPT "(venv) " +end diff --git a/venv/bin/chardetect b/venv/bin/chardetect new file mode 100755 index 0000000000..c1cdda014a --- /dev/null +++ b/venv/bin/chardetect @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from chardet.cli.chardetect import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/coverage b/venv/bin/coverage new file mode 100755 index 0000000000..e21e208ded --- /dev/null +++ b/venv/bin/coverage @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from coverage.cmdline import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/coverage-3.12 b/venv/bin/coverage-3.12 new file mode 100755 index 0000000000..e21e208ded --- /dev/null +++ b/venv/bin/coverage-3.12 @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from coverage.cmdline import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/coverage3 b/venv/bin/coverage3 new file mode 100755 index 0000000000..e21e208ded --- /dev/null +++ b/venv/bin/coverage3 @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from coverage.cmdline import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/django-admin b/venv/bin/django-admin new file mode 100755 index 0000000000..7e9024159d --- /dev/null +++ b/venv/bin/django-admin @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from django.core.management import execute_from_command_line +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(execute_from_command_line()) diff --git a/venv/bin/docutils b/venv/bin/docutils new file mode 100755 index 0000000000..38ba90808e --- /dev/null +++ b/venv/bin/docutils @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from docutils.__main__ import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/ghp-import b/venv/bin/ghp-import new file mode 100755 index 0000000000..906cc339a2 --- /dev/null +++ b/venv/bin/ghp-import @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from ghp_import import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/keyring b/venv/bin/keyring new file mode 100755 index 0000000000..1afe9a2c73 --- /dev/null +++ b/venv/bin/keyring @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from keyring.cli import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/markdown-it b/venv/bin/markdown-it new file mode 100755 index 0000000000..fc492d0815 --- /dev/null +++ b/venv/bin/markdown-it @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from markdown_it.cli.parse import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/markdown_py b/venv/bin/markdown_py new file mode 100755 index 0000000000..fc335fd2d6 --- /dev/null +++ b/venv/bin/markdown_py @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from markdown.__main__ import run +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(run()) diff --git a/venv/bin/mkdocs b/venv/bin/mkdocs new file mode 100755 index 0000000000..6881d2cd18 --- /dev/null +++ b/venv/bin/mkdocs @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from mkdocs.__main__ import cli +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(cli()) diff --git a/venv/bin/mkdocs-get-deps b/venv/bin/mkdocs-get-deps new file mode 100755 index 0000000000..4d92041331 --- /dev/null +++ b/venv/bin/mkdocs-get-deps @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from mkdocs_get_deps.__main__ import cli +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(cli()) diff --git a/venv/bin/normalizer b/venv/bin/normalizer new file mode 100755 index 0000000000..16629718f5 --- /dev/null +++ b/venv/bin/normalizer @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from charset_normalizer import cli +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(cli.cli_detect()) diff --git a/venv/bin/pip b/venv/bin/pip new file mode 100755 index 0000000000..16030c2631 --- /dev/null +++ b/venv/bin/pip @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from pip._internal.cli.main import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/pip3 b/venv/bin/pip3 new file mode 100755 index 0000000000..16030c2631 --- /dev/null +++ b/venv/bin/pip3 @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from pip._internal.cli.main import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/pip3.12 b/venv/bin/pip3.12 new file mode 100755 index 0000000000..16030c2631 --- /dev/null +++ b/venv/bin/pip3.12 @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from pip._internal.cli.main import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/pkginfo b/venv/bin/pkginfo new file mode 100755 index 0000000000..9a280824c5 --- /dev/null +++ b/venv/bin/pkginfo @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from pkginfo.commandline import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/py.test b/venv/bin/py.test new file mode 100755 index 0000000000..a7903b6018 --- /dev/null +++ b/venv/bin/py.test @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from pytest import console_main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(console_main()) diff --git a/venv/bin/pygmentize b/venv/bin/pygmentize new file mode 100755 index 0000000000..307e30026a --- /dev/null +++ b/venv/bin/pygmentize @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from pygments.cmdline import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/pylinkvalidate.py b/venv/bin/pylinkvalidate.py new file mode 100755 index 0000000000..6ecfead1aa --- /dev/null +++ b/venv/bin/pylinkvalidate.py @@ -0,0 +1,6 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 + +from pylinkvalidator import crawler + +if __name__ == "__main__": + crawler.execute_from_command_line() diff --git a/venv/bin/pytest b/venv/bin/pytest new file mode 100755 index 0000000000..a7903b6018 --- /dev/null +++ b/venv/bin/pytest @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from pytest import console_main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(console_main()) diff --git a/venv/bin/python b/venv/bin/python new file mode 120000 index 0000000000..11b9d88531 --- /dev/null +++ b/venv/bin/python @@ -0,0 +1 @@ +python3.12 \ No newline at end of file diff --git a/venv/bin/python3 b/venv/bin/python3 new file mode 120000 index 0000000000..11b9d88531 --- /dev/null +++ b/venv/bin/python3 @@ -0,0 +1 @@ +python3.12 \ No newline at end of file diff --git a/venv/bin/python3.12 b/venv/bin/python3.12 new file mode 120000 index 0000000000..a7a5fcca3b --- /dev/null +++ b/venv/bin/python3.12 @@ -0,0 +1 @@ +/Library/Frameworks/Python.framework/Versions/3.12/bin/python3.12 \ No newline at end of file diff --git a/venv/bin/rst2html b/venv/bin/rst2html new file mode 100755 index 0000000000..cabca12ba0 --- /dev/null +++ b/venv/bin/rst2html @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from docutils.core import rst2html +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(rst2html()) diff --git a/venv/bin/rst2html4 b/venv/bin/rst2html4 new file mode 100755 index 0000000000..2833ddfa67 --- /dev/null +++ b/venv/bin/rst2html4 @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from docutils.core import rst2html4 +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(rst2html4()) diff --git a/venv/bin/rst2html5 b/venv/bin/rst2html5 new file mode 100755 index 0000000000..796e6e34ef --- /dev/null +++ b/venv/bin/rst2html5 @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from docutils.core import rst2html5 +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(rst2html5()) diff --git a/venv/bin/rst2latex b/venv/bin/rst2latex new file mode 100755 index 0000000000..8788a84295 --- /dev/null +++ b/venv/bin/rst2latex @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from docutils.core import rst2latex +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(rst2latex()) diff --git a/venv/bin/rst2man b/venv/bin/rst2man new file mode 100755 index 0000000000..04033939bf --- /dev/null +++ b/venv/bin/rst2man @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from docutils.core import rst2man +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(rst2man()) diff --git a/venv/bin/rst2odt b/venv/bin/rst2odt new file mode 100755 index 0000000000..a9fb54db5f --- /dev/null +++ b/venv/bin/rst2odt @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from docutils.core import rst2odt +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(rst2odt()) diff --git a/venv/bin/rst2pseudoxml b/venv/bin/rst2pseudoxml new file mode 100755 index 0000000000..939a5e02f4 --- /dev/null +++ b/venv/bin/rst2pseudoxml @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from docutils.core import rst2pseudoxml +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(rst2pseudoxml()) diff --git a/venv/bin/rst2s5 b/venv/bin/rst2s5 new file mode 100755 index 0000000000..cf59298bf9 --- /dev/null +++ b/venv/bin/rst2s5 @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from docutils.core import rst2s5 +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(rst2s5()) diff --git a/venv/bin/rst2xetex b/venv/bin/rst2xetex new file mode 100755 index 0000000000..1f11aebd1a --- /dev/null +++ b/venv/bin/rst2xetex @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from docutils.core import rst2xetex +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(rst2xetex()) diff --git a/venv/bin/rst2xml b/venv/bin/rst2xml new file mode 100755 index 0000000000..0f13f41110 --- /dev/null +++ b/venv/bin/rst2xml @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from docutils.core import rst2xml +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(rst2xml()) diff --git a/venv/bin/sqlformat b/venv/bin/sqlformat new file mode 100755 index 0000000000..c2ea6a6bc5 --- /dev/null +++ b/venv/bin/sqlformat @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from sqlparse.__main__ import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/tox b/venv/bin/tox new file mode 100755 index 0000000000..a5ba62829c --- /dev/null +++ b/venv/bin/tox @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from tox.run import run +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(run()) diff --git a/venv/bin/twine b/venv/bin/twine new file mode 100755 index 0000000000..8695d6f4fb --- /dev/null +++ b/venv/bin/twine @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from twine.__main__ import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/tx b/venv/bin/tx new file mode 100755 index 0000000000..81c0999867 --- /dev/null +++ b/venv/bin/tx @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from txclib.cmdline import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/virtualenv b/venv/bin/virtualenv new file mode 100755 index 0000000000..74bbfa5f85 --- /dev/null +++ b/venv/bin/virtualenv @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from virtualenv.__main__ import run_with_catch +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(run_with_catch()) diff --git a/venv/bin/watchmedo b/venv/bin/watchmedo new file mode 100755 index 0000000000..e3f92abe07 --- /dev/null +++ b/venv/bin/watchmedo @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from watchdog.watchmedo import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/bin/wheel b/venv/bin/wheel new file mode 100755 index 0000000000..bdb1a7b4e2 --- /dev/null +++ b/venv/bin/wheel @@ -0,0 +1,8 @@ +#!/Users/darshanreddy/django-rest-frameworK/venv/bin/python3.12 +# -*- coding: utf-8 -*- +import re +import sys +from wheel.cli import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv/lib/python3.12/site-packages/MarkupSafe-3.0.2.dist-info/INSTALLER b/venv/lib/python3.12/site-packages/MarkupSafe-3.0.2.dist-info/INSTALLER new file mode 100644 index 0000000000..a1b589e38a --- /dev/null +++ b/venv/lib/python3.12/site-packages/MarkupSafe-3.0.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.12/site-packages/MarkupSafe-3.0.2.dist-info/LICENSE.txt b/venv/lib/python3.12/site-packages/MarkupSafe-3.0.2.dist-info/LICENSE.txt new file mode 100644 index 0000000000..9d227a0cc4 --- /dev/null +++ b/venv/lib/python3.12/site-packages/MarkupSafe-3.0.2.dist-info/LICENSE.txt @@ -0,0 +1,28 @@ +Copyright 2010 Pallets + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/lib/python3.12/site-packages/MarkupSafe-3.0.2.dist-info/METADATA b/venv/lib/python3.12/site-packages/MarkupSafe-3.0.2.dist-info/METADATA new file mode 100644 index 0000000000..82261f2a46 --- /dev/null +++ b/venv/lib/python3.12/site-packages/MarkupSafe-3.0.2.dist-info/METADATA @@ -0,0 +1,92 @@ +Metadata-Version: 2.1 +Name: MarkupSafe +Version: 3.0.2 +Summary: Safely add untrusted strings to HTML/XML markup. +Maintainer-email: Pallets +License: Copyright 2010 Pallets + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Project-URL: Donate, https://palletsprojects.com/donate +Project-URL: Documentation, https://markupsafe.palletsprojects.com/ +Project-URL: Changes, https://markupsafe.palletsprojects.com/changes/ +Project-URL: Source, https://github.com/pallets/markupsafe/ +Project-URL: Chat, https://discord.gg/pallets +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content +Classifier: Topic :: Text Processing :: Markup :: HTML +Classifier: Typing :: Typed +Requires-Python: >=3.9 +Description-Content-Type: text/markdown +License-File: LICENSE.txt + +# MarkupSafe + +MarkupSafe implements a text object that escapes characters so it is +safe to use in HTML and XML. Characters that have special meanings are +replaced so that they display as the actual characters. This mitigates +injection attacks, meaning untrusted user input can safely be displayed +on a page. + + +## Examples + +```pycon +>>> from markupsafe import Markup, escape + +>>> # escape replaces special characters and wraps in Markup +>>> escape("") +Markup('<script>alert(document.cookie);</script>') + +>>> # wrap in Markup to mark text "safe" and prevent escaping +>>> Markup("Hello") +Markup('hello') + +>>> escape(Markup("Hello")) +Markup('hello') + +>>> # Markup is a str subclass +>>> # methods and operators escape their arguments +>>> template = Markup("Hello {name}") +>>> template.format(name='"World"') +Markup('Hello "World"') +``` + +## Donate + +The Pallets organization develops and supports MarkupSafe and other +popular packages. In order to grow the community of contributors and +users, and allow the maintainers to devote more time to the projects, +[please donate today][]. + +[please donate today]: https://palletsprojects.com/donate diff --git a/venv/lib/python3.12/site-packages/MarkupSafe-3.0.2.dist-info/RECORD b/venv/lib/python3.12/site-packages/MarkupSafe-3.0.2.dist-info/RECORD new file mode 100644 index 0000000000..6393e1d633 --- /dev/null +++ b/venv/lib/python3.12/site-packages/MarkupSafe-3.0.2.dist-info/RECORD @@ -0,0 +1,14 @@ +MarkupSafe-3.0.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +MarkupSafe-3.0.2.dist-info/LICENSE.txt,sha256=SJqOEQhQntmKN7uYPhHg9-HTHwvY-Zp5yESOf_N9B-o,1475 +MarkupSafe-3.0.2.dist-info/METADATA,sha256=aAwbZhSmXdfFuMM-rEHpeiHRkBOGESyVLJIuwzHP-nw,3975 +MarkupSafe-3.0.2.dist-info/RECORD,, +MarkupSafe-3.0.2.dist-info/WHEEL,sha256=T94HOVPNbYE6jyG6QmiIglWJ01nwJvHIWFgubY8hhjc,109 +MarkupSafe-3.0.2.dist-info/top_level.txt,sha256=qy0Plje5IJuvsCBjejJyhDCjEAdcDLK_2agVcex8Z6U,11 +markupsafe/__init__.py,sha256=sr-U6_27DfaSrj5jnHYxWN-pvhM27sjlDplMDPZKm7k,13214 +markupsafe/__pycache__/__init__.cpython-312.pyc,, +markupsafe/__pycache__/_native.cpython-312.pyc,, +markupsafe/_native.py,sha256=hSLs8Jmz5aqayuengJJ3kdT5PwNpBWpKrmQSdipndC8,210 +markupsafe/_speedups.c,sha256=O7XulmTo-epI6n2FtMVOrJXl8EAaIwD2iNYmBI5SEoQ,4149 +markupsafe/_speedups.cpython-312-darwin.so,sha256=LAX7ul6PQsfO_zZxF631eSzN6U7PoGVTR_Wwd2pGav8,50624 +markupsafe/_speedups.pyi,sha256=ENd1bYe7gbBUf2ywyYWOGUpnXOHNJ-cgTNqetlW8h5k,41 +markupsafe/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/venv/lib/python3.12/site-packages/MarkupSafe-3.0.2.dist-info/WHEEL b/venv/lib/python3.12/site-packages/MarkupSafe-3.0.2.dist-info/WHEEL new file mode 100644 index 0000000000..edd13a08a3 --- /dev/null +++ b/venv/lib/python3.12/site-packages/MarkupSafe-3.0.2.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (75.2.0) +Root-Is-Purelib: false +Tag: cp312-cp312-macosx_11_0_arm64 + diff --git a/venv/lib/python3.12/site-packages/MarkupSafe-3.0.2.dist-info/top_level.txt b/venv/lib/python3.12/site-packages/MarkupSafe-3.0.2.dist-info/top_level.txt new file mode 100644 index 0000000000..75bf729258 --- /dev/null +++ b/venv/lib/python3.12/site-packages/MarkupSafe-3.0.2.dist-info/top_level.txt @@ -0,0 +1 @@ +markupsafe diff --git a/venv/lib/python3.12/site-packages/__editable__.djangorestframework-3.16.0.pth b/venv/lib/python3.12/site-packages/__editable__.djangorestframework-3.16.0.pth new file mode 100644 index 0000000000..ca68a91d11 --- /dev/null +++ b/venv/lib/python3.12/site-packages/__editable__.djangorestframework-3.16.0.pth @@ -0,0 +1 @@ +import __editable___djangorestframework_3_16_0_finder; __editable___djangorestframework_3_16_0_finder.install() \ No newline at end of file diff --git a/venv/lib/python3.12/site-packages/__editable___djangorestframework_3_16_0_finder.py b/venv/lib/python3.12/site-packages/__editable___djangorestframework_3_16_0_finder.py new file mode 100644 index 0000000000..c768c2c845 --- /dev/null +++ b/venv/lib/python3.12/site-packages/__editable___djangorestframework_3_16_0_finder.py @@ -0,0 +1,85 @@ +from __future__ import annotations +import sys +from importlib.machinery import ModuleSpec, PathFinder +from importlib.machinery import all_suffixes as module_suffixes +from importlib.util import spec_from_file_location +from itertools import chain +from pathlib import Path + +MAPPING: dict[str, str] = {'rest_framework': '/Users/darshanreddy/django-rest-frameworK/rest_framework'} +NAMESPACES: dict[str, list[str]] = {} +PATH_PLACEHOLDER = '__editable__.djangorestframework-3.16.0.finder' + ".__path_hook__" + + +class _EditableFinder: # MetaPathFinder + @classmethod + def find_spec(cls, fullname: str, path=None, target=None) -> ModuleSpec | None: # type: ignore + # Top-level packages and modules (we know these exist in the FS) + if fullname in MAPPING: + pkg_path = MAPPING[fullname] + return cls._find_spec(fullname, Path(pkg_path)) + + # Handle immediate children modules (required for namespaces to work) + # To avoid problems with case sensitivity in the file system we delegate + # to the importlib.machinery implementation. + parent, _, child = fullname.rpartition(".") + if parent and parent in MAPPING: + return PathFinder.find_spec(fullname, path=[MAPPING[parent]]) + + # Other levels of nesting should be handled automatically by importlib + # using the parent path. + return None + + @classmethod + def _find_spec(cls, fullname: str, candidate_path: Path) -> ModuleSpec | None: + init = candidate_path / "__init__.py" + candidates = (candidate_path.with_suffix(x) for x in module_suffixes()) + for candidate in chain([init], candidates): + if candidate.exists(): + return spec_from_file_location(fullname, candidate) + return None + + +class _EditableNamespaceFinder: # PathEntryFinder + @classmethod + def _path_hook(cls, path) -> type[_EditableNamespaceFinder]: + if path == PATH_PLACEHOLDER: + return cls + raise ImportError + + @classmethod + def _paths(cls, fullname: str) -> list[str]: + paths = NAMESPACES[fullname] + if not paths and fullname in MAPPING: + paths = [MAPPING[fullname]] + # Always add placeholder, for 2 reasons: + # 1. __path__ cannot be empty for the spec to be considered namespace. + # 2. In the case of nested namespaces, we need to force + # import machinery to query _EditableNamespaceFinder again. + return [*paths, PATH_PLACEHOLDER] + + @classmethod + def find_spec(cls, fullname: str, target=None) -> ModuleSpec | None: # type: ignore + if fullname in NAMESPACES: + spec = ModuleSpec(fullname, None, is_package=True) + spec.submodule_search_locations = cls._paths(fullname) + return spec + return None + + @classmethod + def find_module(cls, _fullname) -> None: + return None + + +def install(): + if not any(finder == _EditableFinder for finder in sys.meta_path): + sys.meta_path.append(_EditableFinder) + + if not NAMESPACES: + return + + if not any(hook == _EditableNamespaceFinder._path_hook for hook in sys.path_hooks): + # PathEntryFinder is needed to create NamespaceSpec without private APIS + sys.path_hooks.append(_EditableNamespaceFinder._path_hook) + if PATH_PLACEHOLDER not in sys.path: + sys.path.append(PATH_PLACEHOLDER) # Used just to trigger the path hook diff --git a/venv/lib/python3.12/site-packages/_pytest/__init__.py b/venv/lib/python3.12/site-packages/_pytest/__init__.py new file mode 100644 index 0000000000..8a406c5c75 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/__init__.py @@ -0,0 +1,9 @@ +__all__ = ["__version__", "version_tuple"] + +try: + from ._version import version as __version__, version_tuple +except ImportError: # pragma: no cover + # broken installation, we don't even try + # unknown only works because we do poor mans version compare + __version__ = "unknown" + version_tuple = (0, 0, "unknown") # type:ignore[assignment] diff --git a/venv/lib/python3.12/site-packages/_pytest/_argcomplete.py b/venv/lib/python3.12/site-packages/_pytest/_argcomplete.py new file mode 100644 index 0000000000..6a8083770a --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/_argcomplete.py @@ -0,0 +1,116 @@ +"""Allow bash-completion for argparse with argcomplete if installed. + +Needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail +to find the magic string, so _ARGCOMPLETE env. var is never set, and +this does not need special code). + +Function try_argcomplete(parser) should be called directly before +the call to ArgumentParser.parse_args(). + +The filescompleter is what you normally would use on the positional +arguments specification, in order to get "dirname/" after "dirn" +instead of the default "dirname ": + + optparser.add_argument(Config._file_or_dir, nargs='*').completer=filescompleter + +Other, application specific, completers should go in the file +doing the add_argument calls as they need to be specified as .completer +attributes as well. (If argcomplete is not installed, the function the +attribute points to will not be used). + +SPEEDUP +======= + +The generic argcomplete script for bash-completion +(/etc/bash_completion.d/python-argcomplete.sh) +uses a python program to determine startup script generated by pip. +You can speed up completion somewhat by changing this script to include + # PYTHON_ARGCOMPLETE_OK +so the python-argcomplete-check-easy-install-script does not +need to be called to find the entry point of the code and see if that is +marked with PYTHON_ARGCOMPLETE_OK. + +INSTALL/DEBUGGING +================= + +To include this support in another application that has setup.py generated +scripts: + +- Add the line: + # PYTHON_ARGCOMPLETE_OK + near the top of the main python entry point. + +- Include in the file calling parse_args(): + from _argcomplete import try_argcomplete, filescompleter + Call try_argcomplete just before parse_args(), and optionally add + filescompleter to the positional arguments' add_argument(). + +If things do not work right away: + +- Switch on argcomplete debugging with (also helpful when doing custom + completers): + export _ARC_DEBUG=1 + +- Run: + python-argcomplete-check-easy-install-script $(which appname) + echo $? + will echo 0 if the magic line has been found, 1 if not. + +- Sometimes it helps to find early on errors using: + _ARGCOMPLETE=1 _ARC_DEBUG=1 appname + which should throw a KeyError: 'COMPLINE' (which is properly set by the + global argcomplete script). +""" +import argparse +import os +import sys +from glob import glob +from typing import Any +from typing import List +from typing import Optional + + +class FastFilesCompleter: + """Fast file completer class.""" + + def __init__(self, directories: bool = True) -> None: + self.directories = directories + + def __call__(self, prefix: str, **kwargs: Any) -> List[str]: + # Only called on non option completions. + if os.sep in prefix[1:]: + prefix_dir = len(os.path.dirname(prefix) + os.sep) + else: + prefix_dir = 0 + completion = [] + globbed = [] + if "*" not in prefix and "?" not in prefix: + # We are on unix, otherwise no bash. + if not prefix or prefix[-1] == os.sep: + globbed.extend(glob(prefix + ".*")) + prefix += "*" + globbed.extend(glob(prefix)) + for x in sorted(globbed): + if os.path.isdir(x): + x += "/" + # Append stripping the prefix (like bash, not like compgen). + completion.append(x[prefix_dir:]) + return completion + + +if os.environ.get("_ARGCOMPLETE"): + try: + import argcomplete.completers + except ImportError: + sys.exit(-1) + filescompleter: Optional[FastFilesCompleter] = FastFilesCompleter() + + def try_argcomplete(parser: argparse.ArgumentParser) -> None: + argcomplete.autocomplete(parser, always_complete_options=False) + +else: + + def try_argcomplete(parser: argparse.ArgumentParser) -> None: + pass + + filescompleter = None diff --git a/venv/lib/python3.12/site-packages/_pytest/_code/__init__.py b/venv/lib/python3.12/site-packages/_pytest/_code/__init__.py new file mode 100644 index 0000000000..511d0dde66 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/_code/__init__.py @@ -0,0 +1,22 @@ +"""Python inspection/code generation API.""" +from .code import Code +from .code import ExceptionInfo +from .code import filter_traceback +from .code import Frame +from .code import getfslineno +from .code import Traceback +from .code import TracebackEntry +from .source import getrawcode +from .source import Source + +__all__ = [ + "Code", + "ExceptionInfo", + "filter_traceback", + "Frame", + "getfslineno", + "getrawcode", + "Traceback", + "TracebackEntry", + "Source", +] diff --git a/venv/lib/python3.12/site-packages/_pytest/_code/code.py b/venv/lib/python3.12/site-packages/_pytest/_code/code.py new file mode 100644 index 0000000000..9b051332ba --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/_code/code.py @@ -0,0 +1,1337 @@ +import ast +import dataclasses +import inspect +import os +import re +import sys +import traceback +from inspect import CO_VARARGS +from inspect import CO_VARKEYWORDS +from io import StringIO +from pathlib import Path +from traceback import format_exception_only +from types import CodeType +from types import FrameType +from types import TracebackType +from typing import Any +from typing import Callable +from typing import ClassVar +from typing import Dict +from typing import Generic +from typing import Iterable +from typing import List +from typing import Mapping +from typing import Optional +from typing import overload +from typing import Pattern +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +import pluggy + +import _pytest +from _pytest._code.source import findsource +from _pytest._code.source import getrawcode +from _pytest._code.source import getstatementrange_ast +from _pytest._code.source import Source +from _pytest._io import TerminalWriter +from _pytest._io.saferepr import safeformat +from _pytest._io.saferepr import saferepr +from _pytest.compat import final +from _pytest.compat import get_real_func +from _pytest.deprecated import check_ispytest +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath + +if TYPE_CHECKING: + from typing_extensions import Final + from typing_extensions import Literal + from typing_extensions import SupportsIndex + + _TracebackStyle = Literal["long", "short", "line", "no", "native", "value", "auto"] + +if sys.version_info[:2] < (3, 11): + from exceptiongroup import BaseExceptionGroup + + +class Code: + """Wrapper around Python code objects.""" + + __slots__ = ("raw",) + + def __init__(self, obj: CodeType) -> None: + self.raw = obj + + @classmethod + def from_function(cls, obj: object) -> "Code": + return cls(getrawcode(obj)) + + def __eq__(self, other): + return self.raw == other.raw + + # Ignore type because of https://github.com/python/mypy/issues/4266. + __hash__ = None # type: ignore + + @property + def firstlineno(self) -> int: + return self.raw.co_firstlineno - 1 + + @property + def name(self) -> str: + return self.raw.co_name + + @property + def path(self) -> Union[Path, str]: + """Return a path object pointing to source code, or an ``str`` in + case of ``OSError`` / non-existing file.""" + if not self.raw.co_filename: + return "" + try: + p = absolutepath(self.raw.co_filename) + # maybe don't try this checking + if not p.exists(): + raise OSError("path check failed.") + return p + except OSError: + # XXX maybe try harder like the weird logic + # in the standard lib [linecache.updatecache] does? + return self.raw.co_filename + + @property + def fullsource(self) -> Optional["Source"]: + """Return a _pytest._code.Source object for the full source file of the code.""" + full, _ = findsource(self.raw) + return full + + def source(self) -> "Source": + """Return a _pytest._code.Source object for the code object's source only.""" + # return source only for that part of code + return Source(self.raw) + + def getargs(self, var: bool = False) -> Tuple[str, ...]: + """Return a tuple with the argument names for the code object. + + If 'var' is set True also return the names of the variable and + keyword arguments when present. + """ + # Handy shortcut for getting args. + raw = self.raw + argcount = raw.co_argcount + if var: + argcount += raw.co_flags & CO_VARARGS + argcount += raw.co_flags & CO_VARKEYWORDS + return raw.co_varnames[:argcount] + + +class Frame: + """Wrapper around a Python frame holding f_locals and f_globals + in which expressions can be evaluated.""" + + __slots__ = ("raw",) + + def __init__(self, frame: FrameType) -> None: + self.raw = frame + + @property + def lineno(self) -> int: + return self.raw.f_lineno - 1 + + @property + def f_globals(self) -> Dict[str, Any]: + return self.raw.f_globals + + @property + def f_locals(self) -> Dict[str, Any]: + return self.raw.f_locals + + @property + def code(self) -> Code: + return Code(self.raw.f_code) + + @property + def statement(self) -> "Source": + """Statement this frame is at.""" + if self.code.fullsource is None: + return Source("") + return self.code.fullsource.getstatement(self.lineno) + + def eval(self, code, **vars): + """Evaluate 'code' in the frame. + + 'vars' are optional additional local variables. + + Returns the result of the evaluation. + """ + f_locals = self.f_locals.copy() + f_locals.update(vars) + return eval(code, self.f_globals, f_locals) + + def repr(self, object: object) -> str: + """Return a 'safe' (non-recursive, one-line) string repr for 'object'.""" + return saferepr(object) + + def getargs(self, var: bool = False): + """Return a list of tuples (name, value) for all arguments. + + If 'var' is set True, also include the variable and keyword arguments + when present. + """ + retval = [] + for arg in self.code.getargs(var): + try: + retval.append((arg, self.f_locals[arg])) + except KeyError: + pass # this can occur when using Psyco + return retval + + +class TracebackEntry: + """A single entry in a Traceback.""" + + __slots__ = ("_rawentry", "_repr_style") + + def __init__( + self, + rawentry: TracebackType, + repr_style: Optional['Literal["short", "long"]'] = None, + ) -> None: + self._rawentry: "Final" = rawentry + self._repr_style: "Final" = repr_style + + def with_repr_style( + self, repr_style: Optional['Literal["short", "long"]'] + ) -> "TracebackEntry": + return TracebackEntry(self._rawentry, repr_style) + + @property + def lineno(self) -> int: + return self._rawentry.tb_lineno - 1 + + @property + def frame(self) -> Frame: + return Frame(self._rawentry.tb_frame) + + @property + def relline(self) -> int: + return self.lineno - self.frame.code.firstlineno + + def __repr__(self) -> str: + return "" % (self.frame.code.path, self.lineno + 1) + + @property + def statement(self) -> "Source": + """_pytest._code.Source object for the current statement.""" + source = self.frame.code.fullsource + assert source is not None + return source.getstatement(self.lineno) + + @property + def path(self) -> Union[Path, str]: + """Path to the source code.""" + return self.frame.code.path + + @property + def locals(self) -> Dict[str, Any]: + """Locals of underlying frame.""" + return self.frame.f_locals + + def getfirstlinesource(self) -> int: + return self.frame.code.firstlineno + + def getsource( + self, astcache: Optional[Dict[Union[str, Path], ast.AST]] = None + ) -> Optional["Source"]: + """Return failing source code.""" + # we use the passed in astcache to not reparse asttrees + # within exception info printing + source = self.frame.code.fullsource + if source is None: + return None + key = astnode = None + if astcache is not None: + key = self.frame.code.path + if key is not None: + astnode = astcache.get(key, None) + start = self.getfirstlinesource() + try: + astnode, _, end = getstatementrange_ast( + self.lineno, source, astnode=astnode + ) + except SyntaxError: + end = self.lineno + 1 + else: + if key is not None and astcache is not None: + astcache[key] = astnode + return source[start:end] + + source = property(getsource) + + def ishidden(self, excinfo: Optional["ExceptionInfo[BaseException]"]) -> bool: + """Return True if the current frame has a var __tracebackhide__ + resolving to True. + + If __tracebackhide__ is a callable, it gets called with the + ExceptionInfo instance and can decide whether to hide the traceback. + + Mostly for internal use. + """ + tbh: Union[ + bool, Callable[[Optional[ExceptionInfo[BaseException]]], bool] + ] = False + for maybe_ns_dct in (self.frame.f_locals, self.frame.f_globals): + # in normal cases, f_locals and f_globals are dictionaries + # however via `exec(...)` / `eval(...)` they can be other types + # (even incorrect types!). + # as such, we suppress all exceptions while accessing __tracebackhide__ + try: + tbh = maybe_ns_dct["__tracebackhide__"] + except Exception: + pass + else: + break + if tbh and callable(tbh): + return tbh(excinfo) + return tbh + + def __str__(self) -> str: + name = self.frame.code.name + try: + line = str(self.statement).lstrip() + except KeyboardInterrupt: + raise + except BaseException: + line = "???" + # This output does not quite match Python's repr for traceback entries, + # but changing it to do so would break certain plugins. See + # https://github.com/pytest-dev/pytest/pull/7535/ for details. + return " File %r:%d in %s\n %s\n" % ( + str(self.path), + self.lineno + 1, + name, + line, + ) + + @property + def name(self) -> str: + """co_name of underlying code.""" + return self.frame.code.raw.co_name + + +class Traceback(List[TracebackEntry]): + """Traceback objects encapsulate and offer higher level access to Traceback entries.""" + + def __init__( + self, + tb: Union[TracebackType, Iterable[TracebackEntry]], + ) -> None: + """Initialize from given python traceback object and ExceptionInfo.""" + if isinstance(tb, TracebackType): + + def f(cur: TracebackType) -> Iterable[TracebackEntry]: + cur_: Optional[TracebackType] = cur + while cur_ is not None: + yield TracebackEntry(cur_) + cur_ = cur_.tb_next + + super().__init__(f(tb)) + else: + super().__init__(tb) + + def cut( + self, + path: Optional[Union["os.PathLike[str]", str]] = None, + lineno: Optional[int] = None, + firstlineno: Optional[int] = None, + excludepath: Optional["os.PathLike[str]"] = None, + ) -> "Traceback": + """Return a Traceback instance wrapping part of this Traceback. + + By providing any combination of path, lineno and firstlineno, the + first frame to start the to-be-returned traceback is determined. + + This allows cutting the first part of a Traceback instance e.g. + for formatting reasons (removing some uninteresting bits that deal + with handling of the exception/traceback). + """ + path_ = None if path is None else os.fspath(path) + excludepath_ = None if excludepath is None else os.fspath(excludepath) + for x in self: + code = x.frame.code + codepath = code.path + if path is not None and str(codepath) != path_: + continue + if ( + excludepath is not None + and isinstance(codepath, Path) + and excludepath_ in (str(p) for p in codepath.parents) # type: ignore[operator] + ): + continue + if lineno is not None and x.lineno != lineno: + continue + if firstlineno is not None and x.frame.code.firstlineno != firstlineno: + continue + return Traceback(x._rawentry) + return self + + @overload + def __getitem__(self, key: "SupportsIndex") -> TracebackEntry: + ... + + @overload + def __getitem__(self, key: slice) -> "Traceback": + ... + + def __getitem__( + self, key: Union["SupportsIndex", slice] + ) -> Union[TracebackEntry, "Traceback"]: + if isinstance(key, slice): + return self.__class__(super().__getitem__(key)) + else: + return super().__getitem__(key) + + def filter( + self, + # TODO(py38): change to positional only. + _excinfo_or_fn: Union[ + "ExceptionInfo[BaseException]", + Callable[[TracebackEntry], bool], + ], + ) -> "Traceback": + """Return a Traceback instance with certain items removed. + + If the filter is an `ExceptionInfo`, removes all the ``TracebackEntry``s + which are hidden (see ishidden() above). + + Otherwise, the filter is a function that gets a single argument, a + ``TracebackEntry`` instance, and should return True when the item should + be added to the ``Traceback``, False when not. + """ + if isinstance(_excinfo_or_fn, ExceptionInfo): + fn = lambda x: not x.ishidden(_excinfo_or_fn) # noqa: E731 + else: + fn = _excinfo_or_fn + return Traceback(filter(fn, self)) + + def recursionindex(self) -> Optional[int]: + """Return the index of the frame/TracebackEntry where recursion originates if + appropriate, None if no recursion occurred.""" + cache: Dict[Tuple[Any, int, int], List[Dict[str, Any]]] = {} + for i, entry in enumerate(self): + # id for the code.raw is needed to work around + # the strange metaprogramming in the decorator lib from pypi + # which generates code objects that have hash/value equality + # XXX needs a test + key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno + # print "checking for recursion at", key + values = cache.setdefault(key, []) + if values: + f = entry.frame + loc = f.f_locals + for otherloc in values: + if otherloc == loc: + return i + values.append(entry.frame.f_locals) + return None + + +E = TypeVar("E", bound=BaseException, covariant=True) + + +@final +@dataclasses.dataclass +class ExceptionInfo(Generic[E]): + """Wraps sys.exc_info() objects and offers help for navigating the traceback.""" + + _assert_start_repr: ClassVar = "AssertionError('assert " + + _excinfo: Optional[Tuple[Type["E"], "E", TracebackType]] + _striptext: str + _traceback: Optional[Traceback] + + def __init__( + self, + excinfo: Optional[Tuple[Type["E"], "E", TracebackType]], + striptext: str = "", + traceback: Optional[Traceback] = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._excinfo = excinfo + self._striptext = striptext + self._traceback = traceback + + @classmethod + def from_exception( + cls, + # Ignoring error: "Cannot use a covariant type variable as a parameter". + # This is OK to ignore because this class is (conceptually) readonly. + # See https://github.com/python/mypy/issues/7049. + exception: E, # type: ignore[misc] + exprinfo: Optional[str] = None, + ) -> "ExceptionInfo[E]": + """Return an ExceptionInfo for an existing exception. + + The exception must have a non-``None`` ``__traceback__`` attribute, + otherwise this function fails with an assertion error. This means that + the exception must have been raised, or added a traceback with the + :py:meth:`~BaseException.with_traceback()` method. + + :param exprinfo: + A text string helping to determine if we should strip + ``AssertionError`` from the output. Defaults to the exception + message/``__str__()``. + + .. versionadded:: 7.4 + """ + assert ( + exception.__traceback__ + ), "Exceptions passed to ExcInfo.from_exception(...) must have a non-None __traceback__." + exc_info = (type(exception), exception, exception.__traceback__) + return cls.from_exc_info(exc_info, exprinfo) + + @classmethod + def from_exc_info( + cls, + exc_info: Tuple[Type[E], E, TracebackType], + exprinfo: Optional[str] = None, + ) -> "ExceptionInfo[E]": + """Like :func:`from_exception`, but using old-style exc_info tuple.""" + _striptext = "" + if exprinfo is None and isinstance(exc_info[1], AssertionError): + exprinfo = getattr(exc_info[1], "msg", None) + if exprinfo is None: + exprinfo = saferepr(exc_info[1]) + if exprinfo and exprinfo.startswith(cls._assert_start_repr): + _striptext = "AssertionError: " + + return cls(exc_info, _striptext, _ispytest=True) + + @classmethod + def from_current( + cls, exprinfo: Optional[str] = None + ) -> "ExceptionInfo[BaseException]": + """Return an ExceptionInfo matching the current traceback. + + .. warning:: + + Experimental API + + :param exprinfo: + A text string helping to determine if we should strip + ``AssertionError`` from the output. Defaults to the exception + message/``__str__()``. + """ + tup = sys.exc_info() + assert tup[0] is not None, "no current exception" + assert tup[1] is not None, "no current exception" + assert tup[2] is not None, "no current exception" + exc_info = (tup[0], tup[1], tup[2]) + return ExceptionInfo.from_exc_info(exc_info, exprinfo) + + @classmethod + def for_later(cls) -> "ExceptionInfo[E]": + """Return an unfilled ExceptionInfo.""" + return cls(None, _ispytest=True) + + def fill_unfilled(self, exc_info: Tuple[Type[E], E, TracebackType]) -> None: + """Fill an unfilled ExceptionInfo created with ``for_later()``.""" + assert self._excinfo is None, "ExceptionInfo was already filled" + self._excinfo = exc_info + + @property + def type(self) -> Type[E]: + """The exception class.""" + assert ( + self._excinfo is not None + ), ".type can only be used after the context manager exits" + return self._excinfo[0] + + @property + def value(self) -> E: + """The exception value.""" + assert ( + self._excinfo is not None + ), ".value can only be used after the context manager exits" + return self._excinfo[1] + + @property + def tb(self) -> TracebackType: + """The exception raw traceback.""" + assert ( + self._excinfo is not None + ), ".tb can only be used after the context manager exits" + return self._excinfo[2] + + @property + def typename(self) -> str: + """The type name of the exception.""" + assert ( + self._excinfo is not None + ), ".typename can only be used after the context manager exits" + return self.type.__name__ + + @property + def traceback(self) -> Traceback: + """The traceback.""" + if self._traceback is None: + self._traceback = Traceback(self.tb) + return self._traceback + + @traceback.setter + def traceback(self, value: Traceback) -> None: + self._traceback = value + + def __repr__(self) -> str: + if self._excinfo is None: + return "" + return "<{} {} tblen={}>".format( + self.__class__.__name__, saferepr(self._excinfo[1]), len(self.traceback) + ) + + def exconly(self, tryshort: bool = False) -> str: + """Return the exception as a string. + + When 'tryshort' resolves to True, and the exception is an + AssertionError, only the actual exception part of the exception + representation is returned (so 'AssertionError: ' is removed from + the beginning). + """ + lines = format_exception_only(self.type, self.value) + text = "".join(lines) + text = text.rstrip() + if tryshort: + if text.startswith(self._striptext): + text = text[len(self._striptext) :] + return text + + def errisinstance( + self, exc: Union[Type[BaseException], Tuple[Type[BaseException], ...]] + ) -> bool: + """Return True if the exception is an instance of exc. + + Consider using ``isinstance(excinfo.value, exc)`` instead. + """ + return isinstance(self.value, exc) + + def _getreprcrash(self) -> Optional["ReprFileLocation"]: + # Find last non-hidden traceback entry that led to the exception of the + # traceback, or None if all hidden. + for i in range(-1, -len(self.traceback) - 1, -1): + entry = self.traceback[i] + if not entry.ishidden(self): + path, lineno = entry.frame.code.raw.co_filename, entry.lineno + exconly = self.exconly(tryshort=True) + return ReprFileLocation(path, lineno + 1, exconly) + return None + + def getrepr( + self, + showlocals: bool = False, + style: "_TracebackStyle" = "long", + abspath: bool = False, + tbfilter: Union[ + bool, Callable[["ExceptionInfo[BaseException]"], Traceback] + ] = True, + funcargs: bool = False, + truncate_locals: bool = True, + chain: bool = True, + ) -> Union["ReprExceptionInfo", "ExceptionChainRepr"]: + """Return str()able representation of this exception info. + + :param bool showlocals: + Show locals per traceback entry. + Ignored if ``style=="native"``. + + :param str style: + long|short|line|no|native|value traceback style. + + :param bool abspath: + If paths should be changed to absolute or left unchanged. + + :param tbfilter: + A filter for traceback entries. + + * If false, don't hide any entries. + * If true, hide internal entries and entries that contain a local + variable ``__tracebackhide__ = True``. + * If a callable, delegates the filtering to the callable. + + Ignored if ``style`` is ``"native"``. + + :param bool funcargs: + Show fixtures ("funcargs" for legacy purposes) per traceback entry. + + :param bool truncate_locals: + With ``showlocals==True``, make sure locals can be safely represented as strings. + + :param bool chain: + If chained exceptions in Python 3 should be shown. + + .. versionchanged:: 3.9 + + Added the ``chain`` parameter. + """ + if style == "native": + return ReprExceptionInfo( + reprtraceback=ReprTracebackNative( + traceback.format_exception( + self.type, + self.value, + self.traceback[0]._rawentry if self.traceback else None, + ) + ), + reprcrash=self._getreprcrash(), + ) + + fmt = FormattedExcinfo( + showlocals=showlocals, + style=style, + abspath=abspath, + tbfilter=tbfilter, + funcargs=funcargs, + truncate_locals=truncate_locals, + chain=chain, + ) + return fmt.repr_excinfo(self) + + def match(self, regexp: Union[str, Pattern[str]]) -> "Literal[True]": + """Check whether the regular expression `regexp` matches the string + representation of the exception using :func:`python:re.search`. + + If it matches `True` is returned, otherwise an `AssertionError` is raised. + """ + __tracebackhide__ = True + value = str(self.value) + msg = f"Regex pattern did not match.\n Regex: {regexp!r}\n Input: {value!r}" + if regexp == value: + msg += "\n Did you mean to `re.escape()` the regex?" + assert re.search(regexp, value), msg + # Return True to allow for "assert excinfo.match()". + return True + + +@dataclasses.dataclass +class FormattedExcinfo: + """Presenting information about failing Functions and Generators.""" + + # for traceback entries + flow_marker: ClassVar = ">" + fail_marker: ClassVar = "E" + + showlocals: bool = False + style: "_TracebackStyle" = "long" + abspath: bool = True + tbfilter: Union[bool, Callable[[ExceptionInfo[BaseException]], Traceback]] = True + funcargs: bool = False + truncate_locals: bool = True + chain: bool = True + astcache: Dict[Union[str, Path], ast.AST] = dataclasses.field( + default_factory=dict, init=False, repr=False + ) + + def _getindent(self, source: "Source") -> int: + # Figure out indent for the given source. + try: + s = str(source.getstatement(len(source) - 1)) + except KeyboardInterrupt: + raise + except BaseException: + try: + s = str(source[-1]) + except KeyboardInterrupt: + raise + except BaseException: + return 0 + return 4 + (len(s) - len(s.lstrip())) + + def _getentrysource(self, entry: TracebackEntry) -> Optional["Source"]: + source = entry.getsource(self.astcache) + if source is not None: + source = source.deindent() + return source + + def repr_args(self, entry: TracebackEntry) -> Optional["ReprFuncArgs"]: + if self.funcargs: + args = [] + for argname, argvalue in entry.frame.getargs(var=True): + args.append((argname, saferepr(argvalue))) + return ReprFuncArgs(args) + return None + + def get_source( + self, + source: Optional["Source"], + line_index: int = -1, + excinfo: Optional[ExceptionInfo[BaseException]] = None, + short: bool = False, + ) -> List[str]: + """Return formatted and marked up source lines.""" + lines = [] + if source is not None and line_index < 0: + line_index += len(source) + if source is None or line_index >= len(source.lines) or line_index < 0: + # `line_index` could still be outside `range(len(source.lines))` if + # we're processing AST with pathological position attributes. + source = Source("???") + line_index = 0 + space_prefix = " " + if short: + lines.append(space_prefix + source.lines[line_index].strip()) + else: + for line in source.lines[:line_index]: + lines.append(space_prefix + line) + lines.append(self.flow_marker + " " + source.lines[line_index]) + for line in source.lines[line_index + 1 :]: + lines.append(space_prefix + line) + if excinfo is not None: + indent = 4 if short else self._getindent(source) + lines.extend(self.get_exconly(excinfo, indent=indent, markall=True)) + return lines + + def get_exconly( + self, + excinfo: ExceptionInfo[BaseException], + indent: int = 4, + markall: bool = False, + ) -> List[str]: + lines = [] + indentstr = " " * indent + # Get the real exception information out. + exlines = excinfo.exconly(tryshort=True).split("\n") + failindent = self.fail_marker + indentstr[1:] + for line in exlines: + lines.append(failindent + line) + if not markall: + failindent = indentstr + return lines + + def repr_locals(self, locals: Mapping[str, object]) -> Optional["ReprLocals"]: + if self.showlocals: + lines = [] + keys = [loc for loc in locals if loc[0] != "@"] + keys.sort() + for name in keys: + value = locals[name] + if name == "__builtins__": + lines.append("__builtins__ = ") + else: + # This formatting could all be handled by the + # _repr() function, which is only reprlib.Repr in + # disguise, so is very configurable. + if self.truncate_locals: + str_repr = saferepr(value) + else: + str_repr = safeformat(value) + # if len(str_repr) < 70 or not isinstance(value, (list, tuple, dict)): + lines.append(f"{name:<10} = {str_repr}") + # else: + # self._line("%-10s =\\" % (name,)) + # # XXX + # pprint.pprint(value, stream=self.excinfowriter) + return ReprLocals(lines) + return None + + def repr_traceback_entry( + self, + entry: Optional[TracebackEntry], + excinfo: Optional[ExceptionInfo[BaseException]] = None, + ) -> "ReprEntry": + lines: List[str] = [] + style = ( + entry._repr_style + if entry is not None and entry._repr_style is not None + else self.style + ) + if style in ("short", "long") and entry is not None: + source = self._getentrysource(entry) + if source is None: + source = Source("???") + line_index = 0 + else: + line_index = entry.lineno - entry.getfirstlinesource() + short = style == "short" + reprargs = self.repr_args(entry) if not short else None + s = self.get_source(source, line_index, excinfo, short=short) + lines.extend(s) + if short: + message = "in %s" % (entry.name) + else: + message = excinfo and excinfo.typename or "" + entry_path = entry.path + path = self._makepath(entry_path) + reprfileloc = ReprFileLocation(path, entry.lineno + 1, message) + localsrepr = self.repr_locals(entry.locals) + return ReprEntry(lines, reprargs, localsrepr, reprfileloc, style) + elif style == "value": + if excinfo: + lines.extend(str(excinfo.value).split("\n")) + return ReprEntry(lines, None, None, None, style) + else: + if excinfo: + lines.extend(self.get_exconly(excinfo, indent=4)) + return ReprEntry(lines, None, None, None, style) + + def _makepath(self, path: Union[Path, str]) -> str: + if not self.abspath and isinstance(path, Path): + try: + np = bestrelpath(Path.cwd(), path) + except OSError: + return str(path) + if len(np) < len(str(path)): + return np + return str(path) + + def repr_traceback(self, excinfo: ExceptionInfo[BaseException]) -> "ReprTraceback": + traceback = excinfo.traceback + if callable(self.tbfilter): + traceback = self.tbfilter(excinfo) + elif self.tbfilter: + traceback = traceback.filter(excinfo) + + if isinstance(excinfo.value, RecursionError): + traceback, extraline = self._truncate_recursive_traceback(traceback) + else: + extraline = None + + if not traceback: + if extraline is None: + extraline = "All traceback entries are hidden. Pass `--full-trace` to see hidden and internal frames." + entries = [self.repr_traceback_entry(None, excinfo)] + return ReprTraceback(entries, extraline, style=self.style) + + last = traceback[-1] + if self.style == "value": + entries = [self.repr_traceback_entry(last, excinfo)] + return ReprTraceback(entries, None, style=self.style) + + entries = [ + self.repr_traceback_entry(entry, excinfo if last == entry else None) + for entry in traceback + ] + return ReprTraceback(entries, extraline, style=self.style) + + def _truncate_recursive_traceback( + self, traceback: Traceback + ) -> Tuple[Traceback, Optional[str]]: + """Truncate the given recursive traceback trying to find the starting + point of the recursion. + + The detection is done by going through each traceback entry and + finding the point in which the locals of the frame are equal to the + locals of a previous frame (see ``recursionindex()``). + + Handle the situation where the recursion process might raise an + exception (for example comparing numpy arrays using equality raises a + TypeError), in which case we do our best to warn the user of the + error and show a limited traceback. + """ + try: + recursionindex = traceback.recursionindex() + except Exception as e: + max_frames = 10 + extraline: Optional[str] = ( + "!!! Recursion error detected, but an error occurred locating the origin of recursion.\n" + " The following exception happened when comparing locals in the stack frame:\n" + " {exc_type}: {exc_msg}\n" + " Displaying first and last {max_frames} stack frames out of {total}." + ).format( + exc_type=type(e).__name__, + exc_msg=str(e), + max_frames=max_frames, + total=len(traceback), + ) + # Type ignored because adding two instances of a List subtype + # currently incorrectly has type List instead of the subtype. + traceback = traceback[:max_frames] + traceback[-max_frames:] # type: ignore + else: + if recursionindex is not None: + extraline = "!!! Recursion detected (same locals & position)" + traceback = traceback[: recursionindex + 1] + else: + extraline = None + + return traceback, extraline + + def repr_excinfo( + self, excinfo: ExceptionInfo[BaseException] + ) -> "ExceptionChainRepr": + repr_chain: List[ + Tuple[ReprTraceback, Optional[ReprFileLocation], Optional[str]] + ] = [] + e: Optional[BaseException] = excinfo.value + excinfo_: Optional[ExceptionInfo[BaseException]] = excinfo + descr = None + seen: Set[int] = set() + while e is not None and id(e) not in seen: + seen.add(id(e)) + + if excinfo_: + # Fall back to native traceback as a temporary workaround until + # full support for exception groups added to ExceptionInfo. + # See https://github.com/pytest-dev/pytest/issues/9159 + if isinstance(e, BaseExceptionGroup): + reprtraceback: Union[ + ReprTracebackNative, ReprTraceback + ] = ReprTracebackNative( + traceback.format_exception( + type(excinfo_.value), + excinfo_.value, + excinfo_.traceback[0]._rawentry, + ) + ) + else: + reprtraceback = self.repr_traceback(excinfo_) + reprcrash = excinfo_._getreprcrash() + else: + # Fallback to native repr if the exception doesn't have a traceback: + # ExceptionInfo objects require a full traceback to work. + reprtraceback = ReprTracebackNative( + traceback.format_exception(type(e), e, None) + ) + reprcrash = None + repr_chain += [(reprtraceback, reprcrash, descr)] + + if e.__cause__ is not None and self.chain: + e = e.__cause__ + excinfo_ = ExceptionInfo.from_exception(e) if e.__traceback__ else None + descr = "The above exception was the direct cause of the following exception:" + elif ( + e.__context__ is not None and not e.__suppress_context__ and self.chain + ): + e = e.__context__ + excinfo_ = ExceptionInfo.from_exception(e) if e.__traceback__ else None + descr = "During handling of the above exception, another exception occurred:" + else: + e = None + repr_chain.reverse() + return ExceptionChainRepr(repr_chain) + + +@dataclasses.dataclass(eq=False) +class TerminalRepr: + def __str__(self) -> str: + # FYI this is called from pytest-xdist's serialization of exception + # information. + io = StringIO() + tw = TerminalWriter(file=io) + self.toterminal(tw) + return io.getvalue().strip() + + def __repr__(self) -> str: + return f"<{self.__class__} instance at {id(self):0x}>" + + def toterminal(self, tw: TerminalWriter) -> None: + raise NotImplementedError() + + +# This class is abstract -- only subclasses are instantiated. +@dataclasses.dataclass(eq=False) +class ExceptionRepr(TerminalRepr): + # Provided by subclasses. + reprtraceback: "ReprTraceback" + reprcrash: Optional["ReprFileLocation"] + sections: List[Tuple[str, str, str]] = dataclasses.field( + init=False, default_factory=list + ) + + def addsection(self, name: str, content: str, sep: str = "-") -> None: + self.sections.append((name, content, sep)) + + def toterminal(self, tw: TerminalWriter) -> None: + for name, content, sep in self.sections: + tw.sep(sep, name) + tw.line(content) + + +@dataclasses.dataclass(eq=False) +class ExceptionChainRepr(ExceptionRepr): + chain: Sequence[Tuple["ReprTraceback", Optional["ReprFileLocation"], Optional[str]]] + + def __init__( + self, + chain: Sequence[ + Tuple["ReprTraceback", Optional["ReprFileLocation"], Optional[str]] + ], + ) -> None: + # reprcrash and reprtraceback of the outermost (the newest) exception + # in the chain. + super().__init__( + reprtraceback=chain[-1][0], + reprcrash=chain[-1][1], + ) + self.chain = chain + + def toterminal(self, tw: TerminalWriter) -> None: + for element in self.chain: + element[0].toterminal(tw) + if element[2] is not None: + tw.line("") + tw.line(element[2], yellow=True) + super().toterminal(tw) + + +@dataclasses.dataclass(eq=False) +class ReprExceptionInfo(ExceptionRepr): + reprtraceback: "ReprTraceback" + reprcrash: Optional["ReprFileLocation"] + + def toterminal(self, tw: TerminalWriter) -> None: + self.reprtraceback.toterminal(tw) + super().toterminal(tw) + + +@dataclasses.dataclass(eq=False) +class ReprTraceback(TerminalRepr): + reprentries: Sequence[Union["ReprEntry", "ReprEntryNative"]] + extraline: Optional[str] + style: "_TracebackStyle" + + entrysep: ClassVar = "_ " + + def toterminal(self, tw: TerminalWriter) -> None: + # The entries might have different styles. + for i, entry in enumerate(self.reprentries): + if entry.style == "long": + tw.line("") + entry.toterminal(tw) + if i < len(self.reprentries) - 1: + next_entry = self.reprentries[i + 1] + if ( + entry.style == "long" + or entry.style == "short" + and next_entry.style == "long" + ): + tw.sep(self.entrysep) + + if self.extraline: + tw.line(self.extraline) + + +class ReprTracebackNative(ReprTraceback): + def __init__(self, tblines: Sequence[str]) -> None: + self.reprentries = [ReprEntryNative(tblines)] + self.extraline = None + self.style = "native" + + +@dataclasses.dataclass(eq=False) +class ReprEntryNative(TerminalRepr): + lines: Sequence[str] + + style: ClassVar["_TracebackStyle"] = "native" + + def toterminal(self, tw: TerminalWriter) -> None: + tw.write("".join(self.lines)) + + +@dataclasses.dataclass(eq=False) +class ReprEntry(TerminalRepr): + lines: Sequence[str] + reprfuncargs: Optional["ReprFuncArgs"] + reprlocals: Optional["ReprLocals"] + reprfileloc: Optional["ReprFileLocation"] + style: "_TracebackStyle" + + def _write_entry_lines(self, tw: TerminalWriter) -> None: + """Write the source code portions of a list of traceback entries with syntax highlighting. + + Usually entries are lines like these: + + " x = 1" + "> assert x == 2" + "E assert 1 == 2" + + This function takes care of rendering the "source" portions of it (the lines without + the "E" prefix) using syntax highlighting, taking care to not highlighting the ">" + character, as doing so might break line continuations. + """ + + if not self.lines: + return + + # separate indents and source lines that are not failures: we want to + # highlight the code but not the indentation, which may contain markers + # such as "> assert 0" + fail_marker = f"{FormattedExcinfo.fail_marker} " + indent_size = len(fail_marker) + indents: List[str] = [] + source_lines: List[str] = [] + failure_lines: List[str] = [] + for index, line in enumerate(self.lines): + is_failure_line = line.startswith(fail_marker) + if is_failure_line: + # from this point on all lines are considered part of the failure + failure_lines.extend(self.lines[index:]) + break + else: + if self.style == "value": + source_lines.append(line) + else: + indents.append(line[:indent_size]) + source_lines.append(line[indent_size:]) + + tw._write_source(source_lines, indents) + + # failure lines are always completely red and bold + for line in failure_lines: + tw.line(line, bold=True, red=True) + + def toterminal(self, tw: TerminalWriter) -> None: + if self.style == "short": + if self.reprfileloc: + self.reprfileloc.toterminal(tw) + self._write_entry_lines(tw) + if self.reprlocals: + self.reprlocals.toterminal(tw, indent=" " * 8) + return + + if self.reprfuncargs: + self.reprfuncargs.toterminal(tw) + + self._write_entry_lines(tw) + + if self.reprlocals: + tw.line("") + self.reprlocals.toterminal(tw) + if self.reprfileloc: + if self.lines: + tw.line("") + self.reprfileloc.toterminal(tw) + + def __str__(self) -> str: + return "{}\n{}\n{}".format( + "\n".join(self.lines), self.reprlocals, self.reprfileloc + ) + + +@dataclasses.dataclass(eq=False) +class ReprFileLocation(TerminalRepr): + path: str + lineno: int + message: str + + def __post_init__(self) -> None: + self.path = str(self.path) + + def toterminal(self, tw: TerminalWriter) -> None: + # Filename and lineno output for each entry, using an output format + # that most editors understand. + msg = self.message + i = msg.find("\n") + if i != -1: + msg = msg[:i] + tw.write(self.path, bold=True, red=True) + tw.line(f":{self.lineno}: {msg}") + + +@dataclasses.dataclass(eq=False) +class ReprLocals(TerminalRepr): + lines: Sequence[str] + + def toterminal(self, tw: TerminalWriter, indent="") -> None: + for line in self.lines: + tw.line(indent + line) + + +@dataclasses.dataclass(eq=False) +class ReprFuncArgs(TerminalRepr): + args: Sequence[Tuple[str, object]] + + def toterminal(self, tw: TerminalWriter) -> None: + if self.args: + linesofar = "" + for name, value in self.args: + ns = f"{name} = {value}" + if len(ns) + len(linesofar) + 2 > tw.fullwidth: + if linesofar: + tw.line(linesofar) + linesofar = ns + else: + if linesofar: + linesofar += ", " + ns + else: + linesofar = ns + if linesofar: + tw.line(linesofar) + tw.line("") + + +def getfslineno(obj: object) -> Tuple[Union[str, Path], int]: + """Return source location (path, lineno) for the given object. + + If the source cannot be determined return ("", -1). + + The line number is 0-based. + """ + # xxx let decorators etc specify a sane ordering + # NOTE: this used to be done in _pytest.compat.getfslineno, initially added + # in 6ec13a2b9. It ("place_as") appears to be something very custom. + obj = get_real_func(obj) + if hasattr(obj, "place_as"): + obj = obj.place_as # type: ignore[attr-defined] + + try: + code = Code.from_function(obj) + except TypeError: + try: + fn = inspect.getsourcefile(obj) or inspect.getfile(obj) # type: ignore[arg-type] + except TypeError: + return "", -1 + + fspath = fn and absolutepath(fn) or "" + lineno = -1 + if fspath: + try: + _, lineno = findsource(obj) + except OSError: + pass + return fspath, lineno + + return code.path, code.firstlineno + + +# Relative paths that we use to filter traceback entries from appearing to the user; +# see filter_traceback. +# note: if we need to add more paths than what we have now we should probably use a list +# for better maintenance. + +_PLUGGY_DIR = Path(pluggy.__file__.rstrip("oc")) +# pluggy is either a package or a single module depending on the version +if _PLUGGY_DIR.name == "__init__.py": + _PLUGGY_DIR = _PLUGGY_DIR.parent +_PYTEST_DIR = Path(_pytest.__file__).parent + + +def filter_traceback(entry: TracebackEntry) -> bool: + """Return True if a TracebackEntry instance should be included in tracebacks. + + We hide traceback entries of: + + * dynamically generated code (no code to show up for it); + * internal traceback from pytest or its internal libraries, py and pluggy. + """ + # entry.path might sometimes return a str object when the entry + # points to dynamically generated code. + # See https://bitbucket.org/pytest-dev/py/issues/71. + raw_filename = entry.frame.code.raw.co_filename + is_generated = "<" in raw_filename and ">" in raw_filename + if is_generated: + return False + + # entry.path might point to a non-existing file, in which case it will + # also return a str object. See #1133. + p = Path(entry.path) + + parents = p.parents + if _PLUGGY_DIR in parents: + return False + if _PYTEST_DIR in parents: + return False + + return True diff --git a/venv/lib/python3.12/site-packages/_pytest/_code/source.py b/venv/lib/python3.12/site-packages/_pytest/_code/source.py new file mode 100644 index 0000000000..208cfb8003 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/_code/source.py @@ -0,0 +1,217 @@ +import ast +import inspect +import textwrap +import tokenize +import types +import warnings +from bisect import bisect_right +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Optional +from typing import overload +from typing import Tuple +from typing import Union + + +class Source: + """An immutable object holding a source code fragment. + + When using Source(...), the source lines are deindented. + """ + + def __init__(self, obj: object = None) -> None: + if not obj: + self.lines: List[str] = [] + elif isinstance(obj, Source): + self.lines = obj.lines + elif isinstance(obj, (tuple, list)): + self.lines = deindent(x.rstrip("\n") for x in obj) + elif isinstance(obj, str): + self.lines = deindent(obj.split("\n")) + else: + try: + rawcode = getrawcode(obj) + src = inspect.getsource(rawcode) + except TypeError: + src = inspect.getsource(obj) # type: ignore[arg-type] + self.lines = deindent(src.split("\n")) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Source): + return NotImplemented + return self.lines == other.lines + + # Ignore type because of https://github.com/python/mypy/issues/4266. + __hash__ = None # type: ignore + + @overload + def __getitem__(self, key: int) -> str: + ... + + @overload + def __getitem__(self, key: slice) -> "Source": + ... + + def __getitem__(self, key: Union[int, slice]) -> Union[str, "Source"]: + if isinstance(key, int): + return self.lines[key] + else: + if key.step not in (None, 1): + raise IndexError("cannot slice a Source with a step") + newsource = Source() + newsource.lines = self.lines[key.start : key.stop] + return newsource + + def __iter__(self) -> Iterator[str]: + return iter(self.lines) + + def __len__(self) -> int: + return len(self.lines) + + def strip(self) -> "Source": + """Return new Source object with trailing and leading blank lines removed.""" + start, end = 0, len(self) + while start < end and not self.lines[start].strip(): + start += 1 + while end > start and not self.lines[end - 1].strip(): + end -= 1 + source = Source() + source.lines[:] = self.lines[start:end] + return source + + def indent(self, indent: str = " " * 4) -> "Source": + """Return a copy of the source object with all lines indented by the + given indent-string.""" + newsource = Source() + newsource.lines = [(indent + line) for line in self.lines] + return newsource + + def getstatement(self, lineno: int) -> "Source": + """Return Source statement which contains the given linenumber + (counted from 0).""" + start, end = self.getstatementrange(lineno) + return self[start:end] + + def getstatementrange(self, lineno: int) -> Tuple[int, int]: + """Return (start, end) tuple which spans the minimal statement region + which containing the given lineno.""" + if not (0 <= lineno < len(self)): + raise IndexError("lineno out of range") + ast, start, end = getstatementrange_ast(lineno, self) + return start, end + + def deindent(self) -> "Source": + """Return a new Source object deindented.""" + newsource = Source() + newsource.lines[:] = deindent(self.lines) + return newsource + + def __str__(self) -> str: + return "\n".join(self.lines) + + +# +# helper functions +# + + +def findsource(obj) -> Tuple[Optional[Source], int]: + try: + sourcelines, lineno = inspect.findsource(obj) + except Exception: + return None, -1 + source = Source() + source.lines = [line.rstrip() for line in sourcelines] + return source, lineno + + +def getrawcode(obj: object, trycall: bool = True) -> types.CodeType: + """Return code object for given function.""" + try: + return obj.__code__ # type: ignore[attr-defined,no-any-return] + except AttributeError: + pass + if trycall: + call = getattr(obj, "__call__", None) + if call and not isinstance(obj, type): + return getrawcode(call, trycall=False) + raise TypeError(f"could not get code object for {obj!r}") + + +def deindent(lines: Iterable[str]) -> List[str]: + return textwrap.dedent("\n".join(lines)).splitlines() + + +def get_statement_startend2(lineno: int, node: ast.AST) -> Tuple[int, Optional[int]]: + # Flatten all statements and except handlers into one lineno-list. + # AST's line numbers start indexing at 1. + values: List[int] = [] + for x in ast.walk(node): + if isinstance(x, (ast.stmt, ast.ExceptHandler)): + # Before Python 3.8, the lineno of a decorated class or function pointed at the decorator. + # Since Python 3.8, the lineno points to the class/def, so need to include the decorators. + if isinstance(x, (ast.ClassDef, ast.FunctionDef, ast.AsyncFunctionDef)): + for d in x.decorator_list: + values.append(d.lineno - 1) + values.append(x.lineno - 1) + for name in ("finalbody", "orelse"): + val: Optional[List[ast.stmt]] = getattr(x, name, None) + if val: + # Treat the finally/orelse part as its own statement. + values.append(val[0].lineno - 1 - 1) + values.sort() + insert_index = bisect_right(values, lineno) + start = values[insert_index - 1] + if insert_index >= len(values): + end = None + else: + end = values[insert_index] + return start, end + + +def getstatementrange_ast( + lineno: int, + source: Source, + assertion: bool = False, + astnode: Optional[ast.AST] = None, +) -> Tuple[ast.AST, int, int]: + if astnode is None: + content = str(source) + # See #4260: + # Don't produce duplicate warnings when compiling source to find AST. + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + astnode = ast.parse(content, "source", "exec") + + start, end = get_statement_startend2(lineno, astnode) + # We need to correct the end: + # - ast-parsing strips comments + # - there might be empty lines + # - we might have lesser indented code blocks at the end + if end is None: + end = len(source.lines) + + if end > start + 1: + # Make sure we don't span differently indented code blocks + # by using the BlockFinder helper used which inspect.getsource() uses itself. + block_finder = inspect.BlockFinder() + # If we start with an indented line, put blockfinder to "started" mode. + block_finder.started = source.lines[start][0].isspace() + it = ((x + "\n") for x in source.lines[start:end]) + try: + for tok in tokenize.generate_tokens(lambda: next(it)): + block_finder.tokeneater(*tok) + except (inspect.EndOfBlock, IndentationError): + end = block_finder.last + start + except Exception: + pass + + # The end might still point to a comment or empty line, correct it. + while end: + line = source.lines[end - 1].lstrip() + if line.startswith("#") or not line: + end -= 1 + else: + break + return astnode, start, end diff --git a/venv/lib/python3.12/site-packages/_pytest/_io/__init__.py b/venv/lib/python3.12/site-packages/_pytest/_io/__init__.py new file mode 100644 index 0000000000..db001e918c --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/_io/__init__.py @@ -0,0 +1,8 @@ +from .terminalwriter import get_terminal_width +from .terminalwriter import TerminalWriter + + +__all__ = [ + "TerminalWriter", + "get_terminal_width", +] diff --git a/venv/lib/python3.12/site-packages/_pytest/_io/saferepr.py b/venv/lib/python3.12/site-packages/_pytest/_io/saferepr.py new file mode 100644 index 0000000000..c701872238 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/_io/saferepr.py @@ -0,0 +1,180 @@ +import pprint +import reprlib +from typing import Any +from typing import Dict +from typing import IO +from typing import Optional + + +def _try_repr_or_str(obj: object) -> str: + try: + return repr(obj) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException: + return f'{type(obj).__name__}("{obj}")' + + +def _format_repr_exception(exc: BaseException, obj: object) -> str: + try: + exc_info = _try_repr_or_str(exc) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as exc: + exc_info = f"unpresentable exception ({_try_repr_or_str(exc)})" + return "<[{} raised in repr()] {} object at 0x{:x}>".format( + exc_info, type(obj).__name__, id(obj) + ) + + +def _ellipsize(s: str, maxsize: int) -> str: + if len(s) > maxsize: + i = max(0, (maxsize - 3) // 2) + j = max(0, maxsize - 3 - i) + return s[:i] + "..." + s[len(s) - j :] + return s + + +class SafeRepr(reprlib.Repr): + """ + repr.Repr that limits the resulting size of repr() and includes + information on exceptions raised during the call. + """ + + def __init__(self, maxsize: Optional[int], use_ascii: bool = False) -> None: + """ + :param maxsize: + If not None, will truncate the resulting repr to that specific size, using ellipsis + somewhere in the middle to hide the extra text. + If None, will not impose any size limits on the returning repr. + """ + super().__init__() + # ``maxstring`` is used by the superclass, and needs to be an int; using a + # very large number in case maxsize is None, meaning we want to disable + # truncation. + self.maxstring = maxsize if maxsize is not None else 1_000_000_000 + self.maxsize = maxsize + self.use_ascii = use_ascii + + def repr(self, x: object) -> str: + try: + if self.use_ascii: + s = ascii(x) + else: + s = super().repr(x) + + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as exc: + s = _format_repr_exception(exc, x) + if self.maxsize is not None: + s = _ellipsize(s, self.maxsize) + return s + + def repr_instance(self, x: object, level: int) -> str: + try: + s = repr(x) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as exc: + s = _format_repr_exception(exc, x) + if self.maxsize is not None: + s = _ellipsize(s, self.maxsize) + return s + + +def safeformat(obj: object) -> str: + """Return a pretty printed string for the given object. + + Failing __repr__ functions of user instances will be represented + with a short exception info. + """ + try: + return pprint.pformat(obj) + except Exception as exc: + return _format_repr_exception(exc, obj) + + +# Maximum size of overall repr of objects to display during assertion errors. +DEFAULT_REPR_MAX_SIZE = 240 + + +def saferepr( + obj: object, maxsize: Optional[int] = DEFAULT_REPR_MAX_SIZE, use_ascii: bool = False +) -> str: + """Return a size-limited safe repr-string for the given object. + + Failing __repr__ functions of user instances will be represented + with a short exception info and 'saferepr' generally takes + care to never raise exceptions itself. + + This function is a wrapper around the Repr/reprlib functionality of the + stdlib. + """ + + return SafeRepr(maxsize, use_ascii).repr(obj) + + +def saferepr_unlimited(obj: object, use_ascii: bool = True) -> str: + """Return an unlimited-size safe repr-string for the given object. + + As with saferepr, failing __repr__ functions of user instances + will be represented with a short exception info. + + This function is a wrapper around simple repr. + + Note: a cleaner solution would be to alter ``saferepr``this way + when maxsize=None, but that might affect some other code. + """ + try: + if use_ascii: + return ascii(obj) + return repr(obj) + except Exception as exc: + return _format_repr_exception(exc, obj) + + +class AlwaysDispatchingPrettyPrinter(pprint.PrettyPrinter): + """PrettyPrinter that always dispatches (regardless of width).""" + + def _format( + self, + object: object, + stream: IO[str], + indent: int, + allowance: int, + context: Dict[int, Any], + level: int, + ) -> None: + # Type ignored because _dispatch is private. + p = self._dispatch.get(type(object).__repr__, None) # type: ignore[attr-defined] + + objid = id(object) + if objid in context or p is None: + # Type ignored because _format is private. + super()._format( # type: ignore[misc] + object, + stream, + indent, + allowance, + context, + level, + ) + return + + context[objid] = 1 + p(self, object, stream, indent, allowance, context, level + 1) + del context[objid] + + +def _pformat_dispatch( + object: object, + indent: int = 1, + width: int = 80, + depth: Optional[int] = None, + *, + compact: bool = False, +) -> str: + return AlwaysDispatchingPrettyPrinter( + indent=indent, width=width, depth=depth, compact=compact + ).pformat(object) diff --git a/venv/lib/python3.12/site-packages/_pytest/_io/terminalwriter.py b/venv/lib/python3.12/site-packages/_pytest/_io/terminalwriter.py new file mode 100644 index 0000000000..379035d858 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/_io/terminalwriter.py @@ -0,0 +1,233 @@ +"""Helper functions for writing to terminals and files.""" +import os +import shutil +import sys +from typing import Optional +from typing import Sequence +from typing import TextIO + +from .wcwidth import wcswidth +from _pytest.compat import final + + +# This code was initially copied from py 1.8.1, file _io/terminalwriter.py. + + +def get_terminal_width() -> int: + width, _ = shutil.get_terminal_size(fallback=(80, 24)) + + # The Windows get_terminal_size may be bogus, let's sanify a bit. + if width < 40: + width = 80 + + return width + + +def should_do_markup(file: TextIO) -> bool: + if os.environ.get("PY_COLORS") == "1": + return True + if os.environ.get("PY_COLORS") == "0": + return False + if "NO_COLOR" in os.environ: + return False + if "FORCE_COLOR" in os.environ: + return True + return ( + hasattr(file, "isatty") and file.isatty() and os.environ.get("TERM") != "dumb" + ) + + +@final +class TerminalWriter: + _esctable = dict( + black=30, + red=31, + green=32, + yellow=33, + blue=34, + purple=35, + cyan=36, + white=37, + Black=40, + Red=41, + Green=42, + Yellow=43, + Blue=44, + Purple=45, + Cyan=46, + White=47, + bold=1, + light=2, + blink=5, + invert=7, + ) + + def __init__(self, file: Optional[TextIO] = None) -> None: + if file is None: + file = sys.stdout + if hasattr(file, "isatty") and file.isatty() and sys.platform == "win32": + try: + import colorama + except ImportError: + pass + else: + file = colorama.AnsiToWin32(file).stream + assert file is not None + self._file = file + self.hasmarkup = should_do_markup(file) + self._current_line = "" + self._terminal_width: Optional[int] = None + self.code_highlight = True + + @property + def fullwidth(self) -> int: + if self._terminal_width is not None: + return self._terminal_width + return get_terminal_width() + + @fullwidth.setter + def fullwidth(self, value: int) -> None: + self._terminal_width = value + + @property + def width_of_current_line(self) -> int: + """Return an estimate of the width so far in the current line.""" + return wcswidth(self._current_line) + + def markup(self, text: str, **markup: bool) -> str: + for name in markup: + if name not in self._esctable: + raise ValueError(f"unknown markup: {name!r}") + if self.hasmarkup: + esc = [self._esctable[name] for name, on in markup.items() if on] + if esc: + text = "".join("\x1b[%sm" % cod for cod in esc) + text + "\x1b[0m" + return text + + def sep( + self, + sepchar: str, + title: Optional[str] = None, + fullwidth: Optional[int] = None, + **markup: bool, + ) -> None: + if fullwidth is None: + fullwidth = self.fullwidth + # The goal is to have the line be as long as possible + # under the condition that len(line) <= fullwidth. + if sys.platform == "win32": + # If we print in the last column on windows we are on a + # new line but there is no way to verify/neutralize this + # (we may not know the exact line width). + # So let's be defensive to avoid empty lines in the output. + fullwidth -= 1 + if title is not None: + # we want 2 + 2*len(fill) + len(title) <= fullwidth + # i.e. 2 + 2*len(sepchar)*N + len(title) <= fullwidth + # 2*len(sepchar)*N <= fullwidth - len(title) - 2 + # N <= (fullwidth - len(title) - 2) // (2*len(sepchar)) + N = max((fullwidth - len(title) - 2) // (2 * len(sepchar)), 1) + fill = sepchar * N + line = f"{fill} {title} {fill}" + else: + # we want len(sepchar)*N <= fullwidth + # i.e. N <= fullwidth // len(sepchar) + line = sepchar * (fullwidth // len(sepchar)) + # In some situations there is room for an extra sepchar at the right, + # in particular if we consider that with a sepchar like "_ " the + # trailing space is not important at the end of the line. + if len(line) + len(sepchar.rstrip()) <= fullwidth: + line += sepchar.rstrip() + + self.line(line, **markup) + + def write(self, msg: str, *, flush: bool = False, **markup: bool) -> None: + if msg: + current_line = msg.rsplit("\n", 1)[-1] + if "\n" in msg: + self._current_line = current_line + else: + self._current_line += current_line + + msg = self.markup(msg, **markup) + + try: + self._file.write(msg) + except UnicodeEncodeError: + # Some environments don't support printing general Unicode + # strings, due to misconfiguration or otherwise; in that case, + # print the string escaped to ASCII. + # When the Unicode situation improves we should consider + # letting the error propagate instead of masking it (see #7475 + # for one brief attempt). + msg = msg.encode("unicode-escape").decode("ascii") + self._file.write(msg) + + if flush: + self.flush() + + def line(self, s: str = "", **markup: bool) -> None: + self.write(s, **markup) + self.write("\n") + + def flush(self) -> None: + self._file.flush() + + def _write_source(self, lines: Sequence[str], indents: Sequence[str] = ()) -> None: + """Write lines of source code possibly highlighted. + + Keeping this private for now because the API is clunky. We should discuss how + to evolve the terminal writer so we can have more precise color support, for example + being able to write part of a line in one color and the rest in another, and so on. + """ + if indents and len(indents) != len(lines): + raise ValueError( + "indents size ({}) should have same size as lines ({})".format( + len(indents), len(lines) + ) + ) + if not indents: + indents = [""] * len(lines) + source = "\n".join(lines) + new_lines = self._highlight(source).splitlines() + for indent, new_line in zip(indents, new_lines): + self.line(indent + new_line) + + def _highlight(self, source: str) -> str: + """Highlight the given source code if we have markup support.""" + from _pytest.config.exceptions import UsageError + + if not self.hasmarkup or not self.code_highlight: + return source + try: + from pygments.formatters.terminal import TerminalFormatter + from pygments.lexers.python import PythonLexer + from pygments import highlight + import pygments.util + except ImportError: + return source + else: + try: + highlighted: str = highlight( + source, + PythonLexer(), + TerminalFormatter( + bg=os.getenv("PYTEST_THEME_MODE", "dark"), + style=os.getenv("PYTEST_THEME"), + ), + ) + return highlighted + except pygments.util.ClassNotFound: + raise UsageError( + "PYTEST_THEME environment variable had an invalid value: '{}'. " + "Only valid pygment styles are allowed.".format( + os.getenv("PYTEST_THEME") + ) + ) + except pygments.util.OptionError: + raise UsageError( + "PYTEST_THEME_MODE environment variable had an invalid value: '{}'. " + "The only allowed values are 'dark' and 'light'.".format( + os.getenv("PYTEST_THEME_MODE") + ) + ) diff --git a/venv/lib/python3.12/site-packages/_pytest/_io/wcwidth.py b/venv/lib/python3.12/site-packages/_pytest/_io/wcwidth.py new file mode 100644 index 0000000000..e5c7bf4d86 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/_io/wcwidth.py @@ -0,0 +1,55 @@ +import unicodedata +from functools import lru_cache + + +@lru_cache(100) +def wcwidth(c: str) -> int: + """Determine how many columns are needed to display a character in a terminal. + + Returns -1 if the character is not printable. + Returns 0, 1 or 2 for other characters. + """ + o = ord(c) + + # ASCII fast path. + if 0x20 <= o < 0x07F: + return 1 + + # Some Cf/Zp/Zl characters which should be zero-width. + if ( + o == 0x0000 + or 0x200B <= o <= 0x200F + or 0x2028 <= o <= 0x202E + or 0x2060 <= o <= 0x2063 + ): + return 0 + + category = unicodedata.category(c) + + # Control characters. + if category == "Cc": + return -1 + + # Combining characters with zero width. + if category in ("Me", "Mn"): + return 0 + + # Full/Wide east asian characters. + if unicodedata.east_asian_width(c) in ("F", "W"): + return 2 + + return 1 + + +def wcswidth(s: str) -> int: + """Determine how many columns are needed to display a string in a terminal. + + Returns -1 if the string contains non-printable characters. + """ + width = 0 + for c in unicodedata.normalize("NFC", s): + wc = wcwidth(c) + if wc < 0: + return -1 + width += wc + return width diff --git a/venv/lib/python3.12/site-packages/_pytest/_py/__init__.py b/venv/lib/python3.12/site-packages/_pytest/_py/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/venv/lib/python3.12/site-packages/_pytest/_py/error.py b/venv/lib/python3.12/site-packages/_pytest/_py/error.py new file mode 100644 index 0000000000..0b8f2d535e --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/_py/error.py @@ -0,0 +1,109 @@ +"""create errno-specific classes for IO or os calls.""" +from __future__ import annotations + +import errno +import os +import sys +from typing import Callable +from typing import TYPE_CHECKING +from typing import TypeVar + +if TYPE_CHECKING: + from typing_extensions import ParamSpec + + P = ParamSpec("P") + +R = TypeVar("R") + + +class Error(EnvironmentError): + def __repr__(self) -> str: + return "{}.{} {!r}: {} ".format( + self.__class__.__module__, + self.__class__.__name__, + self.__class__.__doc__, + " ".join(map(str, self.args)), + # repr(self.args) + ) + + def __str__(self) -> str: + s = "[{}]: {}".format( + self.__class__.__doc__, + " ".join(map(str, self.args)), + ) + return s + + +_winerrnomap = { + 2: errno.ENOENT, + 3: errno.ENOENT, + 17: errno.EEXIST, + 18: errno.EXDEV, + 13: errno.EBUSY, # empty cd drive, but ENOMEDIUM seems unavailiable + 22: errno.ENOTDIR, + 20: errno.ENOTDIR, + 267: errno.ENOTDIR, + 5: errno.EACCES, # anything better? +} + + +class ErrorMaker: + """lazily provides Exception classes for each possible POSIX errno + (as defined per the 'errno' module). All such instances + subclass EnvironmentError. + """ + + _errno2class: dict[int, type[Error]] = {} + + def __getattr__(self, name: str) -> type[Error]: + if name[0] == "_": + raise AttributeError(name) + eno = getattr(errno, name) + cls = self._geterrnoclass(eno) + setattr(self, name, cls) + return cls + + def _geterrnoclass(self, eno: int) -> type[Error]: + try: + return self._errno2class[eno] + except KeyError: + clsname = errno.errorcode.get(eno, "UnknownErrno%d" % (eno,)) + errorcls = type( + clsname, + (Error,), + {"__module__": "py.error", "__doc__": os.strerror(eno)}, + ) + self._errno2class[eno] = errorcls + return errorcls + + def checked_call( + self, func: Callable[P, R], *args: P.args, **kwargs: P.kwargs + ) -> R: + """Call a function and raise an errno-exception if applicable.""" + __tracebackhide__ = True + try: + return func(*args, **kwargs) + except Error: + raise + except OSError as value: + if not hasattr(value, "errno"): + raise + errno = value.errno + if sys.platform == "win32": + try: + cls = self._geterrnoclass(_winerrnomap[errno]) + except KeyError: + raise value + else: + # we are not on Windows, or we got a proper OSError + cls = self._geterrnoclass(errno) + + raise cls(f"{func.__name__}{args!r}") + + +_error_maker = ErrorMaker() +checked_call = _error_maker.checked_call + + +def __getattr__(attr: str) -> type[Error]: + return getattr(_error_maker, attr) # type: ignore[no-any-return] diff --git a/venv/lib/python3.12/site-packages/_pytest/_py/path.py b/venv/lib/python3.12/site-packages/_pytest/_py/path.py new file mode 100644 index 0000000000..73a070d19a --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/_py/path.py @@ -0,0 +1,1475 @@ +"""local path implementation.""" +from __future__ import annotations + +import atexit +import fnmatch +import importlib.util +import io +import os +import posixpath +import sys +import uuid +import warnings +from contextlib import contextmanager +from os.path import abspath +from os.path import dirname +from os.path import exists +from os.path import isabs +from os.path import isdir +from os.path import isfile +from os.path import islink +from os.path import normpath +from stat import S_ISDIR +from stat import S_ISLNK +from stat import S_ISREG +from typing import Any +from typing import Callable +from typing import cast +from typing import overload +from typing import TYPE_CHECKING + +from . import error + +if TYPE_CHECKING: + from typing import Literal + +# Moved from local.py. +iswin32 = sys.platform == "win32" or (getattr(os, "_name", False) == "nt") + + +class Checkers: + _depend_on_existence = "exists", "link", "dir", "file" + + def __init__(self, path): + self.path = path + + def dotfile(self): + return self.path.basename.startswith(".") + + def ext(self, arg): + if not arg.startswith("."): + arg = "." + arg + return self.path.ext == arg + + def basename(self, arg): + return self.path.basename == arg + + def basestarts(self, arg): + return self.path.basename.startswith(arg) + + def relto(self, arg): + return self.path.relto(arg) + + def fnmatch(self, arg): + return self.path.fnmatch(arg) + + def endswith(self, arg): + return str(self.path).endswith(arg) + + def _evaluate(self, kw): + from .._code.source import getrawcode + + for name, value in kw.items(): + invert = False + meth = None + try: + meth = getattr(self, name) + except AttributeError: + if name[:3] == "not": + invert = True + try: + meth = getattr(self, name[3:]) + except AttributeError: + pass + if meth is None: + raise TypeError(f"no {name!r} checker available for {self.path!r}") + try: + if getrawcode(meth).co_argcount > 1: + if (not meth(value)) ^ invert: + return False + else: + if bool(value) ^ bool(meth()) ^ invert: + return False + except (error.ENOENT, error.ENOTDIR, error.EBUSY): + # EBUSY feels not entirely correct, + # but its kind of necessary since ENOMEDIUM + # is not accessible in python + for name in self._depend_on_existence: + if name in kw: + if kw.get(name): + return False + name = "not" + name + if name in kw: + if not kw.get(name): + return False + return True + + _statcache: Stat + + def _stat(self) -> Stat: + try: + return self._statcache + except AttributeError: + try: + self._statcache = self.path.stat() + except error.ELOOP: + self._statcache = self.path.lstat() + return self._statcache + + def dir(self): + return S_ISDIR(self._stat().mode) + + def file(self): + return S_ISREG(self._stat().mode) + + def exists(self): + return self._stat() + + def link(self): + st = self.path.lstat() + return S_ISLNK(st.mode) + + +class NeverRaised(Exception): + pass + + +class Visitor: + def __init__(self, fil, rec, ignore, bf, sort): + if isinstance(fil, str): + fil = FNMatcher(fil) + if isinstance(rec, str): + self.rec: Callable[[LocalPath], bool] = FNMatcher(rec) + elif not hasattr(rec, "__call__") and rec: + self.rec = lambda path: True + else: + self.rec = rec + self.fil = fil + self.ignore = ignore + self.breadthfirst = bf + self.optsort = cast(Callable[[Any], Any], sorted) if sort else (lambda x: x) + + def gen(self, path): + try: + entries = path.listdir() + except self.ignore: + return + rec = self.rec + dirs = self.optsort( + [p for p in entries if p.check(dir=1) and (rec is None or rec(p))] + ) + if not self.breadthfirst: + for subdir in dirs: + for p in self.gen(subdir): + yield p + for p in self.optsort(entries): + if self.fil is None or self.fil(p): + yield p + if self.breadthfirst: + for subdir in dirs: + for p in self.gen(subdir): + yield p + + +class FNMatcher: + def __init__(self, pattern): + self.pattern = pattern + + def __call__(self, path): + pattern = self.pattern + + if ( + pattern.find(path.sep) == -1 + and iswin32 + and pattern.find(posixpath.sep) != -1 + ): + # Running on Windows, the pattern has no Windows path separators, + # and the pattern has one or more Posix path separators. Replace + # the Posix path separators with the Windows path separator. + pattern = pattern.replace(posixpath.sep, path.sep) + + if pattern.find(path.sep) == -1: + name = path.basename + else: + name = str(path) # path.strpath # XXX svn? + if not os.path.isabs(pattern): + pattern = "*" + path.sep + pattern + return fnmatch.fnmatch(name, pattern) + + +def map_as_list(func, iter): + return list(map(func, iter)) + + +class Stat: + if TYPE_CHECKING: + + @property + def size(self) -> int: + ... + + @property + def mtime(self) -> float: + ... + + def __getattr__(self, name: str) -> Any: + return getattr(self._osstatresult, "st_" + name) + + def __init__(self, path, osstatresult): + self.path = path + self._osstatresult = osstatresult + + @property + def owner(self): + if iswin32: + raise NotImplementedError("XXX win32") + import pwd + + entry = error.checked_call(pwd.getpwuid, self.uid) # type:ignore[attr-defined] + return entry[0] + + @property + def group(self): + """Return group name of file.""" + if iswin32: + raise NotImplementedError("XXX win32") + import grp + + entry = error.checked_call(grp.getgrgid, self.gid) # type:ignore[attr-defined] + return entry[0] + + def isdir(self): + return S_ISDIR(self._osstatresult.st_mode) + + def isfile(self): + return S_ISREG(self._osstatresult.st_mode) + + def islink(self): + self.path.lstat() + return S_ISLNK(self._osstatresult.st_mode) + + +def getuserid(user): + import pwd + + if not isinstance(user, int): + user = pwd.getpwnam(user)[2] # type:ignore[attr-defined] + return user + + +def getgroupid(group): + import grp + + if not isinstance(group, int): + group = grp.getgrnam(group)[2] # type:ignore[attr-defined] + return group + + +class LocalPath: + """Object oriented interface to os.path and other local filesystem + related information. + """ + + class ImportMismatchError(ImportError): + """raised on pyimport() if there is a mismatch of __file__'s""" + + sep = os.sep + + def __init__(self, path=None, expanduser=False): + """Initialize and return a local Path instance. + + Path can be relative to the current directory. + If path is None it defaults to the current working directory. + If expanduser is True, tilde-expansion is performed. + Note that Path instances always carry an absolute path. + Note also that passing in a local path object will simply return + the exact same path object. Use new() to get a new copy. + """ + if path is None: + self.strpath = error.checked_call(os.getcwd) + else: + try: + path = os.fspath(path) + except TypeError: + raise ValueError( + "can only pass None, Path instances " + "or non-empty strings to LocalPath" + ) + if expanduser: + path = os.path.expanduser(path) + self.strpath = abspath(path) + + if sys.platform != "win32": + + def chown(self, user, group, rec=0): + """Change ownership to the given user and group. + user and group may be specified by a number or + by a name. if rec is True change ownership + recursively. + """ + uid = getuserid(user) + gid = getgroupid(group) + if rec: + for x in self.visit(rec=lambda x: x.check(link=0)): + if x.check(link=0): + error.checked_call(os.chown, str(x), uid, gid) + error.checked_call(os.chown, str(self), uid, gid) + + def readlink(self) -> str: + """Return value of a symbolic link.""" + # https://github.com/python/mypy/issues/12278 + return error.checked_call(os.readlink, self.strpath) # type: ignore[arg-type,return-value] + + def mklinkto(self, oldname): + """Posix style hard link to another name.""" + error.checked_call(os.link, str(oldname), str(self)) + + def mksymlinkto(self, value, absolute=1): + """Create a symbolic link with the given value (pointing to another name).""" + if absolute: + error.checked_call(os.symlink, str(value), self.strpath) + else: + base = self.common(value) + # with posix local paths '/' is always a common base + relsource = self.__class__(value).relto(base) + reldest = self.relto(base) + n = reldest.count(self.sep) + target = self.sep.join(("..",) * n + (relsource,)) + error.checked_call(os.symlink, target, self.strpath) + + def __div__(self, other): + return self.join(os.fspath(other)) + + __truediv__ = __div__ # py3k + + @property + def basename(self): + """Basename part of path.""" + return self._getbyspec("basename")[0] + + @property + def dirname(self): + """Dirname part of path.""" + return self._getbyspec("dirname")[0] + + @property + def purebasename(self): + """Pure base name of the path.""" + return self._getbyspec("purebasename")[0] + + @property + def ext(self): + """Extension of the path (including the '.').""" + return self._getbyspec("ext")[0] + + def read_binary(self): + """Read and return a bytestring from reading the path.""" + with self.open("rb") as f: + return f.read() + + def read_text(self, encoding): + """Read and return a Unicode string from reading the path.""" + with self.open("r", encoding=encoding) as f: + return f.read() + + def read(self, mode="r"): + """Read and return a bytestring from reading the path.""" + with self.open(mode) as f: + return f.read() + + def readlines(self, cr=1): + """Read and return a list of lines from the path. if cr is False, the + newline will be removed from the end of each line.""" + mode = "r" + + if not cr: + content = self.read(mode) + return content.split("\n") + else: + f = self.open(mode) + try: + return f.readlines() + finally: + f.close() + + def load(self): + """(deprecated) return object unpickled from self.read()""" + f = self.open("rb") + try: + import pickle + + return error.checked_call(pickle.load, f) + finally: + f.close() + + def move(self, target): + """Move this path to target.""" + if target.relto(self): + raise error.EINVAL(target, "cannot move path into a subdirectory of itself") + try: + self.rename(target) + except error.EXDEV: # invalid cross-device link + self.copy(target) + self.remove() + + def fnmatch(self, pattern): + """Return true if the basename/fullname matches the glob-'pattern'. + + valid pattern characters:: + + * matches everything + ? matches any single character + [seq] matches any character in seq + [!seq] matches any char not in seq + + If the pattern contains a path-separator then the full path + is used for pattern matching and a '*' is prepended to the + pattern. + + if the pattern doesn't contain a path-separator the pattern + is only matched against the basename. + """ + return FNMatcher(pattern)(self) + + def relto(self, relpath): + """Return a string which is the relative part of the path + to the given 'relpath'. + """ + if not isinstance(relpath, (str, LocalPath)): + raise TypeError(f"{relpath!r}: not a string or path object") + strrelpath = str(relpath) + if strrelpath and strrelpath[-1] != self.sep: + strrelpath += self.sep + # assert strrelpath[-1] == self.sep + # assert strrelpath[-2] != self.sep + strself = self.strpath + if sys.platform == "win32" or getattr(os, "_name", None) == "nt": + if os.path.normcase(strself).startswith(os.path.normcase(strrelpath)): + return strself[len(strrelpath) :] + elif strself.startswith(strrelpath): + return strself[len(strrelpath) :] + return "" + + def ensure_dir(self, *args): + """Ensure the path joined with args is a directory.""" + return self.ensure(*args, **{"dir": True}) + + def bestrelpath(self, dest): + """Return a string which is a relative path from self + (assumed to be a directory) to dest such that + self.join(bestrelpath) == dest and if not such + path can be determined return dest. + """ + try: + if self == dest: + return os.curdir + base = self.common(dest) + if not base: # can be the case on windows + return str(dest) + self2base = self.relto(base) + reldest = dest.relto(base) + if self2base: + n = self2base.count(self.sep) + 1 + else: + n = 0 + lst = [os.pardir] * n + if reldest: + lst.append(reldest) + target = dest.sep.join(lst) + return target + except AttributeError: + return str(dest) + + def exists(self): + return self.check() + + def isdir(self): + return self.check(dir=1) + + def isfile(self): + return self.check(file=1) + + def parts(self, reverse=False): + """Return a root-first list of all ancestor directories + plus the path itself. + """ + current = self + lst = [self] + while 1: + last = current + current = current.dirpath() + if last == current: + break + lst.append(current) + if not reverse: + lst.reverse() + return lst + + def common(self, other): + """Return the common part shared with the other path + or None if there is no common part. + """ + last = None + for x, y in zip(self.parts(), other.parts()): + if x != y: + return last + last = x + return last + + def __add__(self, other): + """Return new path object with 'other' added to the basename""" + return self.new(basename=self.basename + str(other)) + + def visit(self, fil=None, rec=None, ignore=NeverRaised, bf=False, sort=False): + """Yields all paths below the current one + + fil is a filter (glob pattern or callable), if not matching the + path will not be yielded, defaulting to None (everything is + returned) + + rec is a filter (glob pattern or callable) that controls whether + a node is descended, defaulting to None + + ignore is an Exception class that is ignoredwhen calling dirlist() + on any of the paths (by default, all exceptions are reported) + + bf if True will cause a breadthfirst search instead of the + default depthfirst. Default: False + + sort if True will sort entries within each directory level. + """ + yield from Visitor(fil, rec, ignore, bf, sort).gen(self) + + def _sortlist(self, res, sort): + if sort: + if hasattr(sort, "__call__"): + warnings.warn( + DeprecationWarning( + "listdir(sort=callable) is deprecated and breaks on python3" + ), + stacklevel=3, + ) + res.sort(sort) + else: + res.sort() + + def __fspath__(self): + return self.strpath + + def __hash__(self): + s = self.strpath + if iswin32: + s = s.lower() + return hash(s) + + def __eq__(self, other): + s1 = os.fspath(self) + try: + s2 = os.fspath(other) + except TypeError: + return False + if iswin32: + s1 = s1.lower() + try: + s2 = s2.lower() + except AttributeError: + return False + return s1 == s2 + + def __ne__(self, other): + return not (self == other) + + def __lt__(self, other): + return os.fspath(self) < os.fspath(other) + + def __gt__(self, other): + return os.fspath(self) > os.fspath(other) + + def samefile(self, other): + """Return True if 'other' references the same file as 'self'.""" + other = os.fspath(other) + if not isabs(other): + other = abspath(other) + if self == other: + return True + if not hasattr(os.path, "samefile"): + return False + return error.checked_call(os.path.samefile, self.strpath, other) + + def remove(self, rec=1, ignore_errors=False): + """Remove a file or directory (or a directory tree if rec=1). + if ignore_errors is True, errors while removing directories will + be ignored. + """ + if self.check(dir=1, link=0): + if rec: + # force remove of readonly files on windows + if iswin32: + self.chmod(0o700, rec=1) + import shutil + + error.checked_call( + shutil.rmtree, self.strpath, ignore_errors=ignore_errors + ) + else: + error.checked_call(os.rmdir, self.strpath) + else: + if iswin32: + self.chmod(0o700) + error.checked_call(os.remove, self.strpath) + + def computehash(self, hashtype="md5", chunksize=524288): + """Return hexdigest of hashvalue for this file.""" + try: + try: + import hashlib as mod + except ImportError: + if hashtype == "sha1": + hashtype = "sha" + mod = __import__(hashtype) + hash = getattr(mod, hashtype)() + except (AttributeError, ImportError): + raise ValueError(f"Don't know how to compute {hashtype!r} hash") + f = self.open("rb") + try: + while 1: + buf = f.read(chunksize) + if not buf: + return hash.hexdigest() + hash.update(buf) + finally: + f.close() + + def new(self, **kw): + """Create a modified version of this path. + the following keyword arguments modify various path parts:: + + a:/some/path/to/a/file.ext + xx drive + xxxxxxxxxxxxxxxxx dirname + xxxxxxxx basename + xxxx purebasename + xxx ext + """ + obj = object.__new__(self.__class__) + if not kw: + obj.strpath = self.strpath + return obj + drive, dirname, basename, purebasename, ext = self._getbyspec( + "drive,dirname,basename,purebasename,ext" + ) + if "basename" in kw: + if "purebasename" in kw or "ext" in kw: + raise ValueError("invalid specification %r" % kw) + else: + pb = kw.setdefault("purebasename", purebasename) + try: + ext = kw["ext"] + except KeyError: + pass + else: + if ext and not ext.startswith("."): + ext = "." + ext + kw["basename"] = pb + ext + + if "dirname" in kw and not kw["dirname"]: + kw["dirname"] = drive + else: + kw.setdefault("dirname", dirname) + kw.setdefault("sep", self.sep) + obj.strpath = normpath("%(dirname)s%(sep)s%(basename)s" % kw) + return obj + + def _getbyspec(self, spec: str) -> list[str]: + """See new for what 'spec' can be.""" + res = [] + parts = self.strpath.split(self.sep) + + args = filter(None, spec.split(",")) + for name in args: + if name == "drive": + res.append(parts[0]) + elif name == "dirname": + res.append(self.sep.join(parts[:-1])) + else: + basename = parts[-1] + if name == "basename": + res.append(basename) + else: + i = basename.rfind(".") + if i == -1: + purebasename, ext = basename, "" + else: + purebasename, ext = basename[:i], basename[i:] + if name == "purebasename": + res.append(purebasename) + elif name == "ext": + res.append(ext) + else: + raise ValueError("invalid part specification %r" % name) + return res + + def dirpath(self, *args, **kwargs): + """Return the directory path joined with any given path arguments.""" + if not kwargs: + path = object.__new__(self.__class__) + path.strpath = dirname(self.strpath) + if args: + path = path.join(*args) + return path + return self.new(basename="").join(*args, **kwargs) + + def join(self, *args: os.PathLike[str], abs: bool = False) -> LocalPath: + """Return a new path by appending all 'args' as path + components. if abs=1 is used restart from root if any + of the args is an absolute path. + """ + sep = self.sep + strargs = [os.fspath(arg) for arg in args] + strpath = self.strpath + if abs: + newargs: list[str] = [] + for arg in reversed(strargs): + if isabs(arg): + strpath = arg + strargs = newargs + break + newargs.insert(0, arg) + # special case for when we have e.g. strpath == "/" + actual_sep = "" if strpath.endswith(sep) else sep + for arg in strargs: + arg = arg.strip(sep) + if iswin32: + # allow unix style paths even on windows. + arg = arg.strip("/") + arg = arg.replace("/", sep) + strpath = strpath + actual_sep + arg + actual_sep = sep + obj = object.__new__(self.__class__) + obj.strpath = normpath(strpath) + return obj + + def open(self, mode="r", ensure=False, encoding=None): + """Return an opened file with the given mode. + + If ensure is True, create parent directories if needed. + """ + if ensure: + self.dirpath().ensure(dir=1) + if encoding: + return error.checked_call(io.open, self.strpath, mode, encoding=encoding) + return error.checked_call(open, self.strpath, mode) + + def _fastjoin(self, name): + child = object.__new__(self.__class__) + child.strpath = self.strpath + self.sep + name + return child + + def islink(self): + return islink(self.strpath) + + def check(self, **kw): + """Check a path for existence and properties. + + Without arguments, return True if the path exists, otherwise False. + + valid checkers:: + + file=1 # is a file + file=0 # is not a file (may not even exist) + dir=1 # is a dir + link=1 # is a link + exists=1 # exists + + You can specify multiple checker definitions, for example:: + + path.check(file=1, link=1) # a link pointing to a file + """ + if not kw: + return exists(self.strpath) + if len(kw) == 1: + if "dir" in kw: + return not kw["dir"] ^ isdir(self.strpath) + if "file" in kw: + return not kw["file"] ^ isfile(self.strpath) + if not kw: + kw = {"exists": 1} + return Checkers(self)._evaluate(kw) + + _patternchars = set("*?[" + os.sep) + + def listdir(self, fil=None, sort=None): + """List directory contents, possibly filter by the given fil func + and possibly sorted. + """ + if fil is None and sort is None: + names = error.checked_call(os.listdir, self.strpath) + return map_as_list(self._fastjoin, names) + if isinstance(fil, str): + if not self._patternchars.intersection(fil): + child = self._fastjoin(fil) + if exists(child.strpath): + return [child] + return [] + fil = FNMatcher(fil) + names = error.checked_call(os.listdir, self.strpath) + res = [] + for name in names: + child = self._fastjoin(name) + if fil is None or fil(child): + res.append(child) + self._sortlist(res, sort) + return res + + def size(self) -> int: + """Return size of the underlying file object""" + return self.stat().size + + def mtime(self) -> float: + """Return last modification time of the path.""" + return self.stat().mtime + + def copy(self, target, mode=False, stat=False): + """Copy path to target. + + If mode is True, will copy copy permission from path to target. + If stat is True, copy permission, last modification + time, last access time, and flags from path to target. + """ + if self.check(file=1): + if target.check(dir=1): + target = target.join(self.basename) + assert self != target + copychunked(self, target) + if mode: + copymode(self.strpath, target.strpath) + if stat: + copystat(self, target) + else: + + def rec(p): + return p.check(link=0) + + for x in self.visit(rec=rec): + relpath = x.relto(self) + newx = target.join(relpath) + newx.dirpath().ensure(dir=1) + if x.check(link=1): + newx.mksymlinkto(x.readlink()) + continue + elif x.check(file=1): + copychunked(x, newx) + elif x.check(dir=1): + newx.ensure(dir=1) + if mode: + copymode(x.strpath, newx.strpath) + if stat: + copystat(x, newx) + + def rename(self, target): + """Rename this path to target.""" + target = os.fspath(target) + return error.checked_call(os.rename, self.strpath, target) + + def dump(self, obj, bin=1): + """Pickle object into path location""" + f = self.open("wb") + import pickle + + try: + error.checked_call(pickle.dump, obj, f, bin) + finally: + f.close() + + def mkdir(self, *args): + """Create & return the directory joined with args.""" + p = self.join(*args) + error.checked_call(os.mkdir, os.fspath(p)) + return p + + def write_binary(self, data, ensure=False): + """Write binary data into path. If ensure is True create + missing parent directories. + """ + if ensure: + self.dirpath().ensure(dir=1) + with self.open("wb") as f: + f.write(data) + + def write_text(self, data, encoding, ensure=False): + """Write text data into path using the specified encoding. + If ensure is True create missing parent directories. + """ + if ensure: + self.dirpath().ensure(dir=1) + with self.open("w", encoding=encoding) as f: + f.write(data) + + def write(self, data, mode="w", ensure=False): + """Write data into path. If ensure is True create + missing parent directories. + """ + if ensure: + self.dirpath().ensure(dir=1) + if "b" in mode: + if not isinstance(data, bytes): + raise ValueError("can only process bytes") + else: + if not isinstance(data, str): + if not isinstance(data, bytes): + data = str(data) + else: + data = data.decode(sys.getdefaultencoding()) + f = self.open(mode) + try: + f.write(data) + finally: + f.close() + + def _ensuredirs(self): + parent = self.dirpath() + if parent == self: + return self + if parent.check(dir=0): + parent._ensuredirs() + if self.check(dir=0): + try: + self.mkdir() + except error.EEXIST: + # race condition: file/dir created by another thread/process. + # complain if it is not a dir + if self.check(dir=0): + raise + return self + + def ensure(self, *args, **kwargs): + """Ensure that an args-joined path exists (by default as + a file). if you specify a keyword argument 'dir=True' + then the path is forced to be a directory path. + """ + p = self.join(*args) + if kwargs.get("dir", 0): + return p._ensuredirs() + else: + p.dirpath()._ensuredirs() + if not p.check(file=1): + p.open("wb").close() + return p + + @overload + def stat(self, raising: Literal[True] = ...) -> Stat: + ... + + @overload + def stat(self, raising: Literal[False]) -> Stat | None: + ... + + def stat(self, raising: bool = True) -> Stat | None: + """Return an os.stat() tuple.""" + if raising: + return Stat(self, error.checked_call(os.stat, self.strpath)) + try: + return Stat(self, os.stat(self.strpath)) + except KeyboardInterrupt: + raise + except Exception: + return None + + def lstat(self) -> Stat: + """Return an os.lstat() tuple.""" + return Stat(self, error.checked_call(os.lstat, self.strpath)) + + def setmtime(self, mtime=None): + """Set modification time for the given path. if 'mtime' is None + (the default) then the file's mtime is set to current time. + + Note that the resolution for 'mtime' is platform dependent. + """ + if mtime is None: + return error.checked_call(os.utime, self.strpath, mtime) + try: + return error.checked_call(os.utime, self.strpath, (-1, mtime)) + except error.EINVAL: + return error.checked_call(os.utime, self.strpath, (self.atime(), mtime)) + + def chdir(self): + """Change directory to self and return old current directory""" + try: + old = self.__class__() + except error.ENOENT: + old = None + error.checked_call(os.chdir, self.strpath) + return old + + @contextmanager + def as_cwd(self): + """ + Return a context manager, which changes to the path's dir during the + managed "with" context. + On __enter__ it returns the old dir, which might be ``None``. + """ + old = self.chdir() + try: + yield old + finally: + if old is not None: + old.chdir() + + def realpath(self): + """Return a new path which contains no symbolic links.""" + return self.__class__(os.path.realpath(self.strpath)) + + def atime(self): + """Return last access time of the path.""" + return self.stat().atime + + def __repr__(self): + return "local(%r)" % self.strpath + + def __str__(self): + """Return string representation of the Path.""" + return self.strpath + + def chmod(self, mode, rec=0): + """Change permissions to the given mode. If mode is an + integer it directly encodes the os-specific modes. + if rec is True perform recursively. + """ + if not isinstance(mode, int): + raise TypeError(f"mode {mode!r} must be an integer") + if rec: + for x in self.visit(rec=rec): + error.checked_call(os.chmod, str(x), mode) + error.checked_call(os.chmod, self.strpath, mode) + + def pypkgpath(self): + """Return the Python package path by looking for the last + directory upwards which still contains an __init__.py. + Return None if a pkgpath can not be determined. + """ + pkgpath = None + for parent in self.parts(reverse=True): + if parent.isdir(): + if not parent.join("__init__.py").exists(): + break + if not isimportable(parent.basename): + break + pkgpath = parent + return pkgpath + + def _ensuresyspath(self, ensuremode, path): + if ensuremode: + s = str(path) + if ensuremode == "append": + if s not in sys.path: + sys.path.append(s) + else: + if s != sys.path[0]: + sys.path.insert(0, s) + + def pyimport(self, modname=None, ensuresyspath=True): + """Return path as an imported python module. + + If modname is None, look for the containing package + and construct an according module name. + The module will be put/looked up in sys.modules. + if ensuresyspath is True then the root dir for importing + the file (taking __init__.py files into account) will + be prepended to sys.path if it isn't there already. + If ensuresyspath=="append" the root dir will be appended + if it isn't already contained in sys.path. + if ensuresyspath is False no modification of syspath happens. + + Special value of ensuresyspath=="importlib" is intended + purely for using in pytest, it is capable only of importing + separate .py files outside packages, e.g. for test suite + without any __init__.py file. It effectively allows having + same-named test modules in different places and offers + mild opt-in via this option. Note that it works only in + recent versions of python. + """ + if not self.check(): + raise error.ENOENT(self) + + if ensuresyspath == "importlib": + if modname is None: + modname = self.purebasename + spec = importlib.util.spec_from_file_location(modname, str(self)) + if spec is None or spec.loader is None: + raise ImportError( + f"Can't find module {modname} at location {str(self)}" + ) + mod = importlib.util.module_from_spec(spec) + spec.loader.exec_module(mod) + return mod + + pkgpath = None + if modname is None: + pkgpath = self.pypkgpath() + if pkgpath is not None: + pkgroot = pkgpath.dirpath() + names = self.new(ext="").relto(pkgroot).split(self.sep) + if names[-1] == "__init__": + names.pop() + modname = ".".join(names) + else: + pkgroot = self.dirpath() + modname = self.purebasename + + self._ensuresyspath(ensuresyspath, pkgroot) + __import__(modname) + mod = sys.modules[modname] + if self.basename == "__init__.py": + return mod # we don't check anything as we might + # be in a namespace package ... too icky to check + modfile = mod.__file__ + assert modfile is not None + if modfile[-4:] in (".pyc", ".pyo"): + modfile = modfile[:-1] + elif modfile.endswith("$py.class"): + modfile = modfile[:-9] + ".py" + if modfile.endswith(os.sep + "__init__.py"): + if self.basename != "__init__.py": + modfile = modfile[:-12] + try: + issame = self.samefile(modfile) + except error.ENOENT: + issame = False + if not issame: + ignore = os.getenv("PY_IGNORE_IMPORTMISMATCH") + if ignore != "1": + raise self.ImportMismatchError(modname, modfile, self) + return mod + else: + try: + return sys.modules[modname] + except KeyError: + # we have a custom modname, do a pseudo-import + import types + + mod = types.ModuleType(modname) + mod.__file__ = str(self) + sys.modules[modname] = mod + try: + with open(str(self), "rb") as f: + exec(f.read(), mod.__dict__) + except BaseException: + del sys.modules[modname] + raise + return mod + + def sysexec(self, *argv: os.PathLike[str], **popen_opts: Any) -> str: + """Return stdout text from executing a system child process, + where the 'self' path points to executable. + The process is directly invoked and not through a system shell. + """ + from subprocess import Popen, PIPE + + popen_opts.pop("stdout", None) + popen_opts.pop("stderr", None) + proc = Popen( + [str(self)] + [str(arg) for arg in argv], + **popen_opts, + stdout=PIPE, + stderr=PIPE, + ) + stdout: str | bytes + stdout, stderr = proc.communicate() + ret = proc.wait() + if isinstance(stdout, bytes): + stdout = stdout.decode(sys.getdefaultencoding()) + if ret != 0: + if isinstance(stderr, bytes): + stderr = stderr.decode(sys.getdefaultencoding()) + raise RuntimeError( + ret, + ret, + str(self), + stdout, + stderr, + ) + return stdout + + @classmethod + def sysfind(cls, name, checker=None, paths=None): + """Return a path object found by looking at the systems + underlying PATH specification. If the checker is not None + it will be invoked to filter matching paths. If a binary + cannot be found, None is returned + Note: This is probably not working on plain win32 systems + but may work on cygwin. + """ + if isabs(name): + p = local(name) + if p.check(file=1): + return p + else: + if paths is None: + if iswin32: + paths = os.environ["Path"].split(";") + if "" not in paths and "." not in paths: + paths.append(".") + try: + systemroot = os.environ["SYSTEMROOT"] + except KeyError: + pass + else: + paths = [ + path.replace("%SystemRoot%", systemroot) for path in paths + ] + else: + paths = os.environ["PATH"].split(":") + tryadd = [] + if iswin32: + tryadd += os.environ["PATHEXT"].split(os.pathsep) + tryadd.append("") + + for x in paths: + for addext in tryadd: + p = local(x).join(name, abs=True) + addext + try: + if p.check(file=1): + if checker: + if not checker(p): + continue + return p + except error.EACCES: + pass + return None + + @classmethod + def _gethomedir(cls): + try: + x = os.environ["HOME"] + except KeyError: + try: + x = os.environ["HOMEDRIVE"] + os.environ["HOMEPATH"] + except KeyError: + return None + return cls(x) + + # """ + # special class constructors for local filesystem paths + # """ + @classmethod + def get_temproot(cls): + """Return the system's temporary directory + (where tempfiles are usually created in) + """ + import tempfile + + return local(tempfile.gettempdir()) + + @classmethod + def mkdtemp(cls, rootdir=None): + """Return a Path object pointing to a fresh new temporary directory + (which we created ourself). + """ + import tempfile + + if rootdir is None: + rootdir = cls.get_temproot() + return cls(error.checked_call(tempfile.mkdtemp, dir=str(rootdir))) + + @classmethod + def make_numbered_dir( + cls, prefix="session-", rootdir=None, keep=3, lock_timeout=172800 + ): # two days + """Return unique directory with a number greater than the current + maximum one. The number is assumed to start directly after prefix. + if keep is true directories with a number less than (maxnum-keep) + will be removed. If .lock files are used (lock_timeout non-zero), + algorithm is multi-process safe. + """ + if rootdir is None: + rootdir = cls.get_temproot() + + nprefix = prefix.lower() + + def parse_num(path): + """Parse the number out of a path (if it matches the prefix)""" + nbasename = path.basename.lower() + if nbasename.startswith(nprefix): + try: + return int(nbasename[len(nprefix) :]) + except ValueError: + pass + + def create_lockfile(path): + """Exclusively create lockfile. Throws when failed""" + mypid = os.getpid() + lockfile = path.join(".lock") + if hasattr(lockfile, "mksymlinkto"): + lockfile.mksymlinkto(str(mypid)) + else: + fd = error.checked_call( + os.open, str(lockfile), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644 + ) + with os.fdopen(fd, "w") as f: + f.write(str(mypid)) + return lockfile + + def atexit_remove_lockfile(lockfile): + """Ensure lockfile is removed at process exit""" + mypid = os.getpid() + + def try_remove_lockfile(): + # in a fork() situation, only the last process should + # remove the .lock, otherwise the other processes run the + # risk of seeing their temporary dir disappear. For now + # we remove the .lock in the parent only (i.e. we assume + # that the children finish before the parent). + if os.getpid() != mypid: + return + try: + lockfile.remove() + except error.Error: + pass + + atexit.register(try_remove_lockfile) + + # compute the maximum number currently in use with the prefix + lastmax = None + while True: + maxnum = -1 + for path in rootdir.listdir(): + num = parse_num(path) + if num is not None: + maxnum = max(maxnum, num) + + # make the new directory + try: + udir = rootdir.mkdir(prefix + str(maxnum + 1)) + if lock_timeout: + lockfile = create_lockfile(udir) + atexit_remove_lockfile(lockfile) + except (error.EEXIST, error.ENOENT, error.EBUSY): + # race condition (1): another thread/process created the dir + # in the meantime - try again + # race condition (2): another thread/process spuriously acquired + # lock treating empty directory as candidate + # for removal - try again + # race condition (3): another thread/process tried to create the lock at + # the same time (happened in Python 3.3 on Windows) + # https://ci.appveyor.com/project/pytestbot/py/build/1.0.21/job/ffi85j4c0lqwsfwa + if lastmax == maxnum: + raise + lastmax = maxnum + continue + break + + def get_mtime(path): + """Read file modification time""" + try: + return path.lstat().mtime + except error.Error: + pass + + garbage_prefix = prefix + "garbage-" + + def is_garbage(path): + """Check if path denotes directory scheduled for removal""" + bn = path.basename + return bn.startswith(garbage_prefix) + + # prune old directories + udir_time = get_mtime(udir) + if keep and udir_time: + for path in rootdir.listdir(): + num = parse_num(path) + if num is not None and num <= (maxnum - keep): + try: + # try acquiring lock to remove directory as exclusive user + if lock_timeout: + create_lockfile(path) + except (error.EEXIST, error.ENOENT, error.EBUSY): + path_time = get_mtime(path) + if not path_time: + # assume directory doesn't exist now + continue + if abs(udir_time - path_time) < lock_timeout: + # assume directory with lockfile exists + # and lock timeout hasn't expired yet + continue + + # path dir locked for exclusive use + # and scheduled for removal to avoid another thread/process + # treating it as a new directory or removal candidate + garbage_path = rootdir.join(garbage_prefix + str(uuid.uuid4())) + try: + path.rename(garbage_path) + garbage_path.remove(rec=1) + except KeyboardInterrupt: + raise + except Exception: # this might be error.Error, WindowsError ... + pass + if is_garbage(path): + try: + path.remove(rec=1) + except KeyboardInterrupt: + raise + except Exception: # this might be error.Error, WindowsError ... + pass + + # make link... + try: + username = os.environ["USER"] # linux, et al + except KeyError: + try: + username = os.environ["USERNAME"] # windows + except KeyError: + username = "current" + + src = str(udir) + dest = src[: src.rfind("-")] + "-" + username + try: + os.unlink(dest) + except OSError: + pass + try: + os.symlink(src, dest) + except (OSError, AttributeError, NotImplementedError): + pass + + return udir + + +def copymode(src, dest): + """Copy permission from src to dst.""" + import shutil + + shutil.copymode(src, dest) + + +def copystat(src, dest): + """Copy permission, last modification time, + last access time, and flags from src to dst.""" + import shutil + + shutil.copystat(str(src), str(dest)) + + +def copychunked(src, dest): + chunksize = 524288 # half a meg of bytes + fsrc = src.open("rb") + try: + fdest = dest.open("wb") + try: + while 1: + buf = fsrc.read(chunksize) + if not buf: + break + fdest.write(buf) + finally: + fdest.close() + finally: + fsrc.close() + + +def isimportable(name): + if name and (name[0].isalpha() or name[0] == "_"): + name = name.replace("_", "") + return not name or name.isalnum() + + +local = LocalPath diff --git a/venv/lib/python3.12/site-packages/_pytest/_version.py b/venv/lib/python3.12/site-packages/_pytest/_version.py new file mode 100644 index 0000000000..458d065928 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/_version.py @@ -0,0 +1,16 @@ +# file generated by setuptools_scm +# don't change, don't track in version control +TYPE_CHECKING = False +if TYPE_CHECKING: + from typing import Tuple, Union + VERSION_TUPLE = Tuple[Union[int, str], ...] +else: + VERSION_TUPLE = object + +version: str +__version__: str +__version_tuple__: VERSION_TUPLE +version_tuple: VERSION_TUPLE + +__version__ = version = '7.4.4' +__version_tuple__ = version_tuple = (7, 4, 4) diff --git a/venv/lib/python3.12/site-packages/_pytest/assertion/__init__.py b/venv/lib/python3.12/site-packages/_pytest/assertion/__init__.py new file mode 100644 index 0000000000..a46e58136b --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/assertion/__init__.py @@ -0,0 +1,181 @@ +"""Support for presenting detailed information in failing assertions.""" +import sys +from typing import Any +from typing import Generator +from typing import List +from typing import Optional +from typing import TYPE_CHECKING + +from _pytest.assertion import rewrite +from _pytest.assertion import truncate +from _pytest.assertion import util +from _pytest.assertion.rewrite import assertstate_key +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.nodes import Item + +if TYPE_CHECKING: + from _pytest.main import Session + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("debugconfig") + group.addoption( + "--assert", + action="store", + dest="assertmode", + choices=("rewrite", "plain"), + default="rewrite", + metavar="MODE", + help=( + "Control assertion debugging tools.\n" + "'plain' performs no assertion debugging.\n" + "'rewrite' (the default) rewrites assert statements in test modules" + " on import to provide assert expression information." + ), + ) + parser.addini( + "enable_assertion_pass_hook", + type="bool", + default=False, + help="Enables the pytest_assertion_pass hook. " + "Make sure to delete any previously generated pyc cache files.", + ) + + +def register_assert_rewrite(*names: str) -> None: + """Register one or more module names to be rewritten on import. + + This function will make sure that this module or all modules inside + the package will get their assert statements rewritten. + Thus you should make sure to call this before the module is + actually imported, usually in your __init__.py if you are a plugin + using a package. + + :param names: The module names to register. + """ + for name in names: + if not isinstance(name, str): + msg = "expected module names as *args, got {0} instead" # type: ignore[unreachable] + raise TypeError(msg.format(repr(names))) + for hook in sys.meta_path: + if isinstance(hook, rewrite.AssertionRewritingHook): + importhook = hook + break + else: + # TODO(typing): Add a protocol for mark_rewrite() and use it + # for importhook and for PytestPluginManager.rewrite_hook. + importhook = DummyRewriteHook() # type: ignore + importhook.mark_rewrite(*names) + + +class DummyRewriteHook: + """A no-op import hook for when rewriting is disabled.""" + + def mark_rewrite(self, *names: str) -> None: + pass + + +class AssertionState: + """State for the assertion plugin.""" + + def __init__(self, config: Config, mode) -> None: + self.mode = mode + self.trace = config.trace.root.get("assertion") + self.hook: Optional[rewrite.AssertionRewritingHook] = None + + +def install_importhook(config: Config) -> rewrite.AssertionRewritingHook: + """Try to install the rewrite hook, raise SystemError if it fails.""" + config.stash[assertstate_key] = AssertionState(config, "rewrite") + config.stash[assertstate_key].hook = hook = rewrite.AssertionRewritingHook(config) + sys.meta_path.insert(0, hook) + config.stash[assertstate_key].trace("installed rewrite import hook") + + def undo() -> None: + hook = config.stash[assertstate_key].hook + if hook is not None and hook in sys.meta_path: + sys.meta_path.remove(hook) + + config.add_cleanup(undo) + return hook + + +def pytest_collection(session: "Session") -> None: + # This hook is only called when test modules are collected + # so for example not in the managing process of pytest-xdist + # (which does not collect test modules). + assertstate = session.config.stash.get(assertstate_key, None) + if assertstate: + if assertstate.hook is not None: + assertstate.hook.set_session(session) + + +@hookimpl(tryfirst=True, hookwrapper=True) +def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]: + """Setup the pytest_assertrepr_compare and pytest_assertion_pass hooks. + + The rewrite module will use util._reprcompare if it exists to use custom + reporting via the pytest_assertrepr_compare hook. This sets up this custom + comparison for the test. + """ + + ihook = item.ihook + + def callbinrepr(op, left: object, right: object) -> Optional[str]: + """Call the pytest_assertrepr_compare hook and prepare the result. + + This uses the first result from the hook and then ensures the + following: + * Overly verbose explanations are truncated unless configured otherwise + (eg. if running in verbose mode). + * Embedded newlines are escaped to help util.format_explanation() + later. + * If the rewrite mode is used embedded %-characters are replaced + to protect later % formatting. + + The result can be formatted by util.format_explanation() for + pretty printing. + """ + hook_result = ihook.pytest_assertrepr_compare( + config=item.config, op=op, left=left, right=right + ) + for new_expl in hook_result: + if new_expl: + new_expl = truncate.truncate_if_required(new_expl, item) + new_expl = [line.replace("\n", "\\n") for line in new_expl] + res = "\n~".join(new_expl) + if item.config.getvalue("assertmode") == "rewrite": + res = res.replace("%", "%%") + return res + return None + + saved_assert_hooks = util._reprcompare, util._assertion_pass + util._reprcompare = callbinrepr + util._config = item.config + + if ihook.pytest_assertion_pass.get_hookimpls(): + + def call_assertion_pass_hook(lineno: int, orig: str, expl: str) -> None: + ihook.pytest_assertion_pass(item=item, lineno=lineno, orig=orig, expl=expl) + + util._assertion_pass = call_assertion_pass_hook + + yield + + util._reprcompare, util._assertion_pass = saved_assert_hooks + util._config = None + + +def pytest_sessionfinish(session: "Session") -> None: + assertstate = session.config.stash.get(assertstate_key, None) + if assertstate: + if assertstate.hook is not None: + assertstate.hook.set_session(None) + + +def pytest_assertrepr_compare( + config: Config, op: str, left: Any, right: Any +) -> Optional[List[str]]: + return util.assertrepr_compare(config=config, op=op, left=left, right=right) diff --git a/venv/lib/python3.12/site-packages/_pytest/assertion/rewrite.py b/venv/lib/python3.12/site-packages/_pytest/assertion/rewrite.py new file mode 100644 index 0000000000..d1974bb3b4 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/assertion/rewrite.py @@ -0,0 +1,1217 @@ +"""Rewrite assertion AST to produce nice error messages.""" +import ast +import errno +import functools +import importlib.abc +import importlib.machinery +import importlib.util +import io +import itertools +import marshal +import os +import struct +import sys +import tokenize +import types +from collections import defaultdict +from pathlib import Path +from pathlib import PurePath +from typing import Callable +from typing import Dict +from typing import IO +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Optional +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union + +from _pytest._io.saferepr import DEFAULT_REPR_MAX_SIZE +from _pytest._io.saferepr import saferepr +from _pytest._version import version +from _pytest.assertion import util +from _pytest.assertion.util import ( # noqa: F401 + format_explanation as _format_explanation, +) +from _pytest.config import Config +from _pytest.main import Session +from _pytest.pathlib import absolutepath +from _pytest.pathlib import fnmatch_ex +from _pytest.stash import StashKey + +if TYPE_CHECKING: + from _pytest.assertion import AssertionState + +if sys.version_info >= (3, 8): + namedExpr = ast.NamedExpr + astNameConstant = ast.Constant + astStr = ast.Constant + astNum = ast.Constant +else: + namedExpr = ast.Expr + astNameConstant = ast.NameConstant + astStr = ast.Str + astNum = ast.Num + + +class Sentinel: + pass + + +assertstate_key = StashKey["AssertionState"]() + +# pytest caches rewritten pycs in pycache dirs +PYTEST_TAG = f"{sys.implementation.cache_tag}-pytest-{version}" +PYC_EXT = ".py" + (__debug__ and "c" or "o") +PYC_TAIL = "." + PYTEST_TAG + PYC_EXT + +# Special marker that denotes we have just left a scope definition +_SCOPE_END_MARKER = Sentinel() + + +class AssertionRewritingHook(importlib.abc.MetaPathFinder, importlib.abc.Loader): + """PEP302/PEP451 import hook which rewrites asserts.""" + + def __init__(self, config: Config) -> None: + self.config = config + try: + self.fnpats = config.getini("python_files") + except ValueError: + self.fnpats = ["test_*.py", "*_test.py"] + self.session: Optional[Session] = None + self._rewritten_names: Dict[str, Path] = {} + self._must_rewrite: Set[str] = set() + # flag to guard against trying to rewrite a pyc file while we are already writing another pyc file, + # which might result in infinite recursion (#3506) + self._writing_pyc = False + self._basenames_to_check_rewrite = {"conftest"} + self._marked_for_rewrite_cache: Dict[str, bool] = {} + self._session_paths_checked = False + + def set_session(self, session: Optional[Session]) -> None: + self.session = session + self._session_paths_checked = False + + # Indirection so we can mock calls to find_spec originated from the hook during testing + _find_spec = importlib.machinery.PathFinder.find_spec + + def find_spec( + self, + name: str, + path: Optional[Sequence[Union[str, bytes]]] = None, + target: Optional[types.ModuleType] = None, + ) -> Optional[importlib.machinery.ModuleSpec]: + if self._writing_pyc: + return None + state = self.config.stash[assertstate_key] + if self._early_rewrite_bailout(name, state): + return None + state.trace("find_module called for: %s" % name) + + # Type ignored because mypy is confused about the `self` binding here. + spec = self._find_spec(name, path) # type: ignore + if ( + # the import machinery could not find a file to import + spec is None + # this is a namespace package (without `__init__.py`) + # there's nothing to rewrite there + or spec.origin is None + # we can only rewrite source files + or not isinstance(spec.loader, importlib.machinery.SourceFileLoader) + # if the file doesn't exist, we can't rewrite it + or not os.path.exists(spec.origin) + ): + return None + else: + fn = spec.origin + + if not self._should_rewrite(name, fn, state): + return None + + return importlib.util.spec_from_file_location( + name, + fn, + loader=self, + submodule_search_locations=spec.submodule_search_locations, + ) + + def create_module( + self, spec: importlib.machinery.ModuleSpec + ) -> Optional[types.ModuleType]: + return None # default behaviour is fine + + def exec_module(self, module: types.ModuleType) -> None: + assert module.__spec__ is not None + assert module.__spec__.origin is not None + fn = Path(module.__spec__.origin) + state = self.config.stash[assertstate_key] + + self._rewritten_names[module.__name__] = fn + + # The requested module looks like a test file, so rewrite it. This is + # the most magical part of the process: load the source, rewrite the + # asserts, and load the rewritten source. We also cache the rewritten + # module code in a special pyc. We must be aware of the possibility of + # concurrent pytest processes rewriting and loading pycs. To avoid + # tricky race conditions, we maintain the following invariant: The + # cached pyc is always a complete, valid pyc. Operations on it must be + # atomic. POSIX's atomic rename comes in handy. + write = not sys.dont_write_bytecode + cache_dir = get_cache_dir(fn) + if write: + ok = try_makedirs(cache_dir) + if not ok: + write = False + state.trace(f"read only directory: {cache_dir}") + + cache_name = fn.name[:-3] + PYC_TAIL + pyc = cache_dir / cache_name + # Notice that even if we're in a read-only directory, I'm going + # to check for a cached pyc. This may not be optimal... + co = _read_pyc(fn, pyc, state.trace) + if co is None: + state.trace(f"rewriting {fn!r}") + source_stat, co = _rewrite_test(fn, self.config) + if write: + self._writing_pyc = True + try: + _write_pyc(state, co, source_stat, pyc) + finally: + self._writing_pyc = False + else: + state.trace(f"found cached rewritten pyc for {fn}") + exec(co, module.__dict__) + + def _early_rewrite_bailout(self, name: str, state: "AssertionState") -> bool: + """A fast way to get out of rewriting modules. + + Profiling has shown that the call to PathFinder.find_spec (inside of + the find_spec from this class) is a major slowdown, so, this method + tries to filter what we're sure won't be rewritten before getting to + it. + """ + if self.session is not None and not self._session_paths_checked: + self._session_paths_checked = True + for initial_path in self.session._initialpaths: + # Make something as c:/projects/my_project/path.py -> + # ['c:', 'projects', 'my_project', 'path.py'] + parts = str(initial_path).split(os.sep) + # add 'path' to basenames to be checked. + self._basenames_to_check_rewrite.add(os.path.splitext(parts[-1])[0]) + + # Note: conftest already by default in _basenames_to_check_rewrite. + parts = name.split(".") + if parts[-1] in self._basenames_to_check_rewrite: + return False + + # For matching the name it must be as if it was a filename. + path = PurePath(*parts).with_suffix(".py") + + for pat in self.fnpats: + # if the pattern contains subdirectories ("tests/**.py" for example) we can't bail out based + # on the name alone because we need to match against the full path + if os.path.dirname(pat): + return False + if fnmatch_ex(pat, path): + return False + + if self._is_marked_for_rewrite(name, state): + return False + + state.trace(f"early skip of rewriting module: {name}") + return True + + def _should_rewrite(self, name: str, fn: str, state: "AssertionState") -> bool: + # always rewrite conftest files + if os.path.basename(fn) == "conftest.py": + state.trace(f"rewriting conftest file: {fn!r}") + return True + + if self.session is not None: + if self.session.isinitpath(absolutepath(fn)): + state.trace(f"matched test file (was specified on cmdline): {fn!r}") + return True + + # modules not passed explicitly on the command line are only + # rewritten if they match the naming convention for test files + fn_path = PurePath(fn) + for pat in self.fnpats: + if fnmatch_ex(pat, fn_path): + state.trace(f"matched test file {fn!r}") + return True + + return self._is_marked_for_rewrite(name, state) + + def _is_marked_for_rewrite(self, name: str, state: "AssertionState") -> bool: + try: + return self._marked_for_rewrite_cache[name] + except KeyError: + for marked in self._must_rewrite: + if name == marked or name.startswith(marked + "."): + state.trace(f"matched marked file {name!r} (from {marked!r})") + self._marked_for_rewrite_cache[name] = True + return True + + self._marked_for_rewrite_cache[name] = False + return False + + def mark_rewrite(self, *names: str) -> None: + """Mark import names as needing to be rewritten. + + The named module or package as well as any nested modules will + be rewritten on import. + """ + already_imported = ( + set(names).intersection(sys.modules).difference(self._rewritten_names) + ) + for name in already_imported: + mod = sys.modules[name] + if not AssertionRewriter.is_rewrite_disabled( + mod.__doc__ or "" + ) and not isinstance(mod.__loader__, type(self)): + self._warn_already_imported(name) + self._must_rewrite.update(names) + self._marked_for_rewrite_cache.clear() + + def _warn_already_imported(self, name: str) -> None: + from _pytest.warning_types import PytestAssertRewriteWarning + + self.config.issue_config_time_warning( + PytestAssertRewriteWarning( + "Module already imported so cannot be rewritten: %s" % name + ), + stacklevel=5, + ) + + def get_data(self, pathname: Union[str, bytes]) -> bytes: + """Optional PEP302 get_data API.""" + with open(pathname, "rb") as f: + return f.read() + + if sys.version_info >= (3, 10): + if sys.version_info >= (3, 12): + from importlib.resources.abc import TraversableResources + else: + from importlib.abc import TraversableResources + + def get_resource_reader(self, name: str) -> TraversableResources: # type: ignore + if sys.version_info < (3, 11): + from importlib.readers import FileReader + else: + from importlib.resources.readers import FileReader + + return FileReader( # type:ignore[no-any-return] + types.SimpleNamespace(path=self._rewritten_names[name]) + ) + + +def _write_pyc_fp( + fp: IO[bytes], source_stat: os.stat_result, co: types.CodeType +) -> None: + # Technically, we don't have to have the same pyc format as + # (C)Python, since these "pycs" should never be seen by builtin + # import. However, there's little reason to deviate. + fp.write(importlib.util.MAGIC_NUMBER) + # https://www.python.org/dev/peps/pep-0552/ + flags = b"\x00\x00\x00\x00" + fp.write(flags) + # as of now, bytecode header expects 32-bit numbers for size and mtime (#4903) + mtime = int(source_stat.st_mtime) & 0xFFFFFFFF + size = source_stat.st_size & 0xFFFFFFFF + # " bool: + proc_pyc = f"{pyc}.{os.getpid()}" + try: + with open(proc_pyc, "wb") as fp: + _write_pyc_fp(fp, source_stat, co) + except OSError as e: + state.trace(f"error writing pyc file at {proc_pyc}: errno={e.errno}") + return False + + try: + os.replace(proc_pyc, pyc) + except OSError as e: + state.trace(f"error writing pyc file at {pyc}: {e}") + # we ignore any failure to write the cache file + # there are many reasons, permission-denied, pycache dir being a + # file etc. + return False + return True + + +def _rewrite_test(fn: Path, config: Config) -> Tuple[os.stat_result, types.CodeType]: + """Read and rewrite *fn* and return the code object.""" + stat = os.stat(fn) + source = fn.read_bytes() + strfn = str(fn) + tree = ast.parse(source, filename=strfn) + rewrite_asserts(tree, source, strfn, config) + co = compile(tree, strfn, "exec", dont_inherit=True) + return stat, co + + +def _read_pyc( + source: Path, pyc: Path, trace: Callable[[str], None] = lambda x: None +) -> Optional[types.CodeType]: + """Possibly read a pytest pyc containing rewritten code. + + Return rewritten code if successful or None if not. + """ + try: + fp = open(pyc, "rb") + except OSError: + return None + with fp: + try: + stat_result = os.stat(source) + mtime = int(stat_result.st_mtime) + size = stat_result.st_size + data = fp.read(16) + except OSError as e: + trace(f"_read_pyc({source}): OSError {e}") + return None + # Check for invalid or out of date pyc file. + if len(data) != (16): + trace("_read_pyc(%s): invalid pyc (too short)" % source) + return None + if data[:4] != importlib.util.MAGIC_NUMBER: + trace("_read_pyc(%s): invalid pyc (bad magic number)" % source) + return None + if data[4:8] != b"\x00\x00\x00\x00": + trace("_read_pyc(%s): invalid pyc (unsupported flags)" % source) + return None + mtime_data = data[8:12] + if int.from_bytes(mtime_data, "little") != mtime & 0xFFFFFFFF: + trace("_read_pyc(%s): out of date" % source) + return None + size_data = data[12:16] + if int.from_bytes(size_data, "little") != size & 0xFFFFFFFF: + trace("_read_pyc(%s): invalid pyc (incorrect size)" % source) + return None + try: + co = marshal.load(fp) + except Exception as e: + trace(f"_read_pyc({source}): marshal.load error {e}") + return None + if not isinstance(co, types.CodeType): + trace("_read_pyc(%s): not a code object" % source) + return None + return co + + +def rewrite_asserts( + mod: ast.Module, + source: bytes, + module_path: Optional[str] = None, + config: Optional[Config] = None, +) -> None: + """Rewrite the assert statements in mod.""" + AssertionRewriter(module_path, config, source).run(mod) + + +def _saferepr(obj: object) -> str: + r"""Get a safe repr of an object for assertion error messages. + + The assertion formatting (util.format_explanation()) requires + newlines to be escaped since they are a special character for it. + Normally assertion.util.format_explanation() does this but for a + custom repr it is possible to contain one of the special escape + sequences, especially '\n{' and '\n}' are likely to be present in + JSON reprs. + """ + maxsize = _get_maxsize_for_saferepr(util._config) + return saferepr(obj, maxsize=maxsize).replace("\n", "\\n") + + +def _get_maxsize_for_saferepr(config: Optional[Config]) -> Optional[int]: + """Get `maxsize` configuration for saferepr based on the given config object.""" + verbosity = config.getoption("verbose") if config is not None else 0 + if verbosity >= 2: + return None + if verbosity >= 1: + return DEFAULT_REPR_MAX_SIZE * 10 + return DEFAULT_REPR_MAX_SIZE + + +def _format_assertmsg(obj: object) -> str: + r"""Format the custom assertion message given. + + For strings this simply replaces newlines with '\n~' so that + util.format_explanation() will preserve them instead of escaping + newlines. For other objects saferepr() is used first. + """ + # reprlib appears to have a bug which means that if a string + # contains a newline it gets escaped, however if an object has a + # .__repr__() which contains newlines it does not get escaped. + # However in either case we want to preserve the newline. + replaces = [("\n", "\n~"), ("%", "%%")] + if not isinstance(obj, str): + obj = saferepr(obj) + replaces.append(("\\n", "\n~")) + + for r1, r2 in replaces: + obj = obj.replace(r1, r2) + + return obj + + +def _should_repr_global_name(obj: object) -> bool: + if callable(obj): + return False + + try: + return not hasattr(obj, "__name__") + except Exception: + return True + + +def _format_boolop(explanations: Iterable[str], is_or: bool) -> str: + explanation = "(" + (is_or and " or " or " and ").join(explanations) + ")" + return explanation.replace("%", "%%") + + +def _call_reprcompare( + ops: Sequence[str], + results: Sequence[bool], + expls: Sequence[str], + each_obj: Sequence[object], +) -> str: + for i, res, expl in zip(range(len(ops)), results, expls): + try: + done = not res + except Exception: + done = True + if done: + break + if util._reprcompare is not None: + custom = util._reprcompare(ops[i], each_obj[i], each_obj[i + 1]) + if custom is not None: + return custom + return expl + + +def _call_assertion_pass(lineno: int, orig: str, expl: str) -> None: + if util._assertion_pass is not None: + util._assertion_pass(lineno, orig, expl) + + +def _check_if_assertion_pass_impl() -> bool: + """Check if any plugins implement the pytest_assertion_pass hook + in order not to generate explanation unnecessarily (might be expensive).""" + return True if util._assertion_pass else False + + +UNARY_MAP = {ast.Not: "not %s", ast.Invert: "~%s", ast.USub: "-%s", ast.UAdd: "+%s"} + +BINOP_MAP = { + ast.BitOr: "|", + ast.BitXor: "^", + ast.BitAnd: "&", + ast.LShift: "<<", + ast.RShift: ">>", + ast.Add: "+", + ast.Sub: "-", + ast.Mult: "*", + ast.Div: "/", + ast.FloorDiv: "//", + ast.Mod: "%%", # escaped for string formatting + ast.Eq: "==", + ast.NotEq: "!=", + ast.Lt: "<", + ast.LtE: "<=", + ast.Gt: ">", + ast.GtE: ">=", + ast.Pow: "**", + ast.Is: "is", + ast.IsNot: "is not", + ast.In: "in", + ast.NotIn: "not in", + ast.MatMult: "@", +} + + +def traverse_node(node: ast.AST) -> Iterator[ast.AST]: + """Recursively yield node and all its children in depth-first order.""" + yield node + for child in ast.iter_child_nodes(node): + yield from traverse_node(child) + + +@functools.lru_cache(maxsize=1) +def _get_assertion_exprs(src: bytes) -> Dict[int, str]: + """Return a mapping from {lineno: "assertion test expression"}.""" + ret: Dict[int, str] = {} + + depth = 0 + lines: List[str] = [] + assert_lineno: Optional[int] = None + seen_lines: Set[int] = set() + + def _write_and_reset() -> None: + nonlocal depth, lines, assert_lineno, seen_lines + assert assert_lineno is not None + ret[assert_lineno] = "".join(lines).rstrip().rstrip("\\") + depth = 0 + lines = [] + assert_lineno = None + seen_lines = set() + + tokens = tokenize.tokenize(io.BytesIO(src).readline) + for tp, source, (lineno, offset), _, line in tokens: + if tp == tokenize.NAME and source == "assert": + assert_lineno = lineno + elif assert_lineno is not None: + # keep track of depth for the assert-message `,` lookup + if tp == tokenize.OP and source in "([{": + depth += 1 + elif tp == tokenize.OP and source in ")]}": + depth -= 1 + + if not lines: + lines.append(line[offset:]) + seen_lines.add(lineno) + # a non-nested comma separates the expression from the message + elif depth == 0 and tp == tokenize.OP and source == ",": + # one line assert with message + if lineno in seen_lines and len(lines) == 1: + offset_in_trimmed = offset + len(lines[-1]) - len(line) + lines[-1] = lines[-1][:offset_in_trimmed] + # multi-line assert with message + elif lineno in seen_lines: + lines[-1] = lines[-1][:offset] + # multi line assert with escapd newline before message + else: + lines.append(line[:offset]) + _write_and_reset() + elif tp in {tokenize.NEWLINE, tokenize.ENDMARKER}: + _write_and_reset() + elif lines and lineno not in seen_lines: + lines.append(line) + seen_lines.add(lineno) + + return ret + + +def _get_ast_constant_value(value: astStr) -> object: + if sys.version_info >= (3, 8): + return value.value + else: + return value.s + + +class AssertionRewriter(ast.NodeVisitor): + """Assertion rewriting implementation. + + The main entrypoint is to call .run() with an ast.Module instance, + this will then find all the assert statements and rewrite them to + provide intermediate values and a detailed assertion error. See + http://pybites.blogspot.be/2011/07/behind-scenes-of-pytests-new-assertion.html + for an overview of how this works. + + The entry point here is .run() which will iterate over all the + statements in an ast.Module and for each ast.Assert statement it + finds call .visit() with it. Then .visit_Assert() takes over and + is responsible for creating new ast statements to replace the + original assert statement: it rewrites the test of an assertion + to provide intermediate values and replace it with an if statement + which raises an assertion error with a detailed explanation in + case the expression is false and calls pytest_assertion_pass hook + if expression is true. + + For this .visit_Assert() uses the visitor pattern to visit all the + AST nodes of the ast.Assert.test field, each visit call returning + an AST node and the corresponding explanation string. During this + state is kept in several instance attributes: + + :statements: All the AST statements which will replace the assert + statement. + + :variables: This is populated by .variable() with each variable + used by the statements so that they can all be set to None at + the end of the statements. + + :variable_counter: Counter to create new unique variables needed + by statements. Variables are created using .variable() and + have the form of "@py_assert0". + + :expl_stmts: The AST statements which will be executed to get + data from the assertion. This is the code which will construct + the detailed assertion message that is used in the AssertionError + or for the pytest_assertion_pass hook. + + :explanation_specifiers: A dict filled by .explanation_param() + with %-formatting placeholders and their corresponding + expressions to use in the building of an assertion message. + This is used by .pop_format_context() to build a message. + + :stack: A stack of the explanation_specifiers dicts maintained by + .push_format_context() and .pop_format_context() which allows + to build another %-formatted string while already building one. + + :scope: A tuple containing the current scope used for variables_overwrite. + + :variables_overwrite: A dict filled with references to variables + that change value within an assert. This happens when a variable is + reassigned with the walrus operator + + This state, except the variables_overwrite, is reset on every new assert + statement visited and used by the other visitors. + """ + + def __init__( + self, module_path: Optional[str], config: Optional[Config], source: bytes + ) -> None: + super().__init__() + self.module_path = module_path + self.config = config + if config is not None: + self.enable_assertion_pass_hook = config.getini( + "enable_assertion_pass_hook" + ) + else: + self.enable_assertion_pass_hook = False + self.source = source + self.scope: tuple[ast.AST, ...] = () + self.variables_overwrite: defaultdict[ + tuple[ast.AST, ...], Dict[str, str] + ] = defaultdict(dict) + + def run(self, mod: ast.Module) -> None: + """Find all assert statements in *mod* and rewrite them.""" + if not mod.body: + # Nothing to do. + return + + # We'll insert some special imports at the top of the module, but after any + # docstrings and __future__ imports, so first figure out where that is. + doc = getattr(mod, "docstring", None) + expect_docstring = doc is None + if doc is not None and self.is_rewrite_disabled(doc): + return + pos = 0 + item = None + for item in mod.body: + if ( + expect_docstring + and isinstance(item, ast.Expr) + and isinstance(item.value, astStr) + and isinstance(_get_ast_constant_value(item.value), str) + ): + doc = _get_ast_constant_value(item.value) + assert isinstance(doc, str) + if self.is_rewrite_disabled(doc): + return + expect_docstring = False + elif ( + isinstance(item, ast.ImportFrom) + and item.level == 0 + and item.module == "__future__" + ): + pass + else: + break + pos += 1 + # Special case: for a decorated function, set the lineno to that of the + # first decorator, not the `def`. Issue #4984. + if isinstance(item, ast.FunctionDef) and item.decorator_list: + lineno = item.decorator_list[0].lineno + else: + lineno = item.lineno + # Now actually insert the special imports. + if sys.version_info >= (3, 10): + aliases = [ + ast.alias("builtins", "@py_builtins", lineno=lineno, col_offset=0), + ast.alias( + "_pytest.assertion.rewrite", + "@pytest_ar", + lineno=lineno, + col_offset=0, + ), + ] + else: + aliases = [ + ast.alias("builtins", "@py_builtins"), + ast.alias("_pytest.assertion.rewrite", "@pytest_ar"), + ] + imports = [ + ast.Import([alias], lineno=lineno, col_offset=0) for alias in aliases + ] + mod.body[pos:pos] = imports + + # Collect asserts. + self.scope = (mod,) + nodes: List[Union[ast.AST, Sentinel]] = [mod] + while nodes: + node = nodes.pop() + if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)): + self.scope = tuple((*self.scope, node)) + nodes.append(_SCOPE_END_MARKER) + if node == _SCOPE_END_MARKER: + self.scope = self.scope[:-1] + continue + assert isinstance(node, ast.AST) + for name, field in ast.iter_fields(node): + if isinstance(field, list): + new: List[ast.AST] = [] + for i, child in enumerate(field): + if isinstance(child, ast.Assert): + # Transform assert. + new.extend(self.visit(child)) + else: + new.append(child) + if isinstance(child, ast.AST): + nodes.append(child) + setattr(node, name, new) + elif ( + isinstance(field, ast.AST) + # Don't recurse into expressions as they can't contain + # asserts. + and not isinstance(field, ast.expr) + ): + nodes.append(field) + + @staticmethod + def is_rewrite_disabled(docstring: str) -> bool: + return "PYTEST_DONT_REWRITE" in docstring + + def variable(self) -> str: + """Get a new variable.""" + # Use a character invalid in python identifiers to avoid clashing. + name = "@py_assert" + str(next(self.variable_counter)) + self.variables.append(name) + return name + + def assign(self, expr: ast.expr) -> ast.Name: + """Give *expr* a name.""" + name = self.variable() + self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr)) + return ast.Name(name, ast.Load()) + + def display(self, expr: ast.expr) -> ast.expr: + """Call saferepr on the expression.""" + return self.helper("_saferepr", expr) + + def helper(self, name: str, *args: ast.expr) -> ast.expr: + """Call a helper in this module.""" + py_name = ast.Name("@pytest_ar", ast.Load()) + attr = ast.Attribute(py_name, name, ast.Load()) + return ast.Call(attr, list(args), []) + + def builtin(self, name: str) -> ast.Attribute: + """Return the builtin called *name*.""" + builtin_name = ast.Name("@py_builtins", ast.Load()) + return ast.Attribute(builtin_name, name, ast.Load()) + + def explanation_param(self, expr: ast.expr) -> str: + """Return a new named %-formatting placeholder for expr. + + This creates a %-formatting placeholder for expr in the + current formatting context, e.g. ``%(py0)s``. The placeholder + and expr are placed in the current format context so that it + can be used on the next call to .pop_format_context(). + """ + specifier = "py" + str(next(self.variable_counter)) + self.explanation_specifiers[specifier] = expr + return "%(" + specifier + ")s" + + def push_format_context(self) -> None: + """Create a new formatting context. + + The format context is used for when an explanation wants to + have a variable value formatted in the assertion message. In + this case the value required can be added using + .explanation_param(). Finally .pop_format_context() is used + to format a string of %-formatted values as added by + .explanation_param(). + """ + self.explanation_specifiers: Dict[str, ast.expr] = {} + self.stack.append(self.explanation_specifiers) + + def pop_format_context(self, expl_expr: ast.expr) -> ast.Name: + """Format the %-formatted string with current format context. + + The expl_expr should be an str ast.expr instance constructed from + the %-placeholders created by .explanation_param(). This will + add the required code to format said string to .expl_stmts and + return the ast.Name instance of the formatted string. + """ + current = self.stack.pop() + if self.stack: + self.explanation_specifiers = self.stack[-1] + keys = [astStr(key) for key in current.keys()] + format_dict = ast.Dict(keys, list(current.values())) + form = ast.BinOp(expl_expr, ast.Mod(), format_dict) + name = "@py_format" + str(next(self.variable_counter)) + if self.enable_assertion_pass_hook: + self.format_variables.append(name) + self.expl_stmts.append(ast.Assign([ast.Name(name, ast.Store())], form)) + return ast.Name(name, ast.Load()) + + def generic_visit(self, node: ast.AST) -> Tuple[ast.Name, str]: + """Handle expressions we don't have custom code for.""" + assert isinstance(node, ast.expr) + res = self.assign(node) + return res, self.explanation_param(self.display(res)) + + def visit_Assert(self, assert_: ast.Assert) -> List[ast.stmt]: + """Return the AST statements to replace the ast.Assert instance. + + This rewrites the test of an assertion to provide + intermediate values and replace it with an if statement which + raises an assertion error with a detailed explanation in case + the expression is false. + """ + if isinstance(assert_.test, ast.Tuple) and len(assert_.test.elts) >= 1: + from _pytest.warning_types import PytestAssertRewriteWarning + import warnings + + # TODO: This assert should not be needed. + assert self.module_path is not None + warnings.warn_explicit( + PytestAssertRewriteWarning( + "assertion is always true, perhaps remove parentheses?" + ), + category=None, + filename=self.module_path, + lineno=assert_.lineno, + ) + + self.statements: List[ast.stmt] = [] + self.variables: List[str] = [] + self.variable_counter = itertools.count() + + if self.enable_assertion_pass_hook: + self.format_variables: List[str] = [] + + self.stack: List[Dict[str, ast.expr]] = [] + self.expl_stmts: List[ast.stmt] = [] + self.push_format_context() + # Rewrite assert into a bunch of statements. + top_condition, explanation = self.visit(assert_.test) + + negation = ast.UnaryOp(ast.Not(), top_condition) + + if self.enable_assertion_pass_hook: # Experimental pytest_assertion_pass hook + msg = self.pop_format_context(astStr(explanation)) + + # Failed + if assert_.msg: + assertmsg = self.helper("_format_assertmsg", assert_.msg) + gluestr = "\n>assert " + else: + assertmsg = astStr("") + gluestr = "assert " + err_explanation = ast.BinOp(astStr(gluestr), ast.Add(), msg) + err_msg = ast.BinOp(assertmsg, ast.Add(), err_explanation) + err_name = ast.Name("AssertionError", ast.Load()) + fmt = self.helper("_format_explanation", err_msg) + exc = ast.Call(err_name, [fmt], []) + raise_ = ast.Raise(exc, None) + statements_fail = [] + statements_fail.extend(self.expl_stmts) + statements_fail.append(raise_) + + # Passed + fmt_pass = self.helper("_format_explanation", msg) + orig = _get_assertion_exprs(self.source)[assert_.lineno] + hook_call_pass = ast.Expr( + self.helper( + "_call_assertion_pass", + astNum(assert_.lineno), + astStr(orig), + fmt_pass, + ) + ) + # If any hooks implement assert_pass hook + hook_impl_test = ast.If( + self.helper("_check_if_assertion_pass_impl"), + self.expl_stmts + [hook_call_pass], + [], + ) + statements_pass = [hook_impl_test] + + # Test for assertion condition + main_test = ast.If(negation, statements_fail, statements_pass) + self.statements.append(main_test) + if self.format_variables: + variables = [ + ast.Name(name, ast.Store()) for name in self.format_variables + ] + clear_format = ast.Assign(variables, astNameConstant(None)) + self.statements.append(clear_format) + + else: # Original assertion rewriting + # Create failure message. + body = self.expl_stmts + self.statements.append(ast.If(negation, body, [])) + if assert_.msg: + assertmsg = self.helper("_format_assertmsg", assert_.msg) + explanation = "\n>assert " + explanation + else: + assertmsg = astStr("") + explanation = "assert " + explanation + template = ast.BinOp(assertmsg, ast.Add(), astStr(explanation)) + msg = self.pop_format_context(template) + fmt = self.helper("_format_explanation", msg) + err_name = ast.Name("AssertionError", ast.Load()) + exc = ast.Call(err_name, [fmt], []) + raise_ = ast.Raise(exc, None) + + body.append(raise_) + + # Clear temporary variables by setting them to None. + if self.variables: + variables = [ast.Name(name, ast.Store()) for name in self.variables] + clear = ast.Assign(variables, astNameConstant(None)) + self.statements.append(clear) + # Fix locations (line numbers/column offsets). + for stmt in self.statements: + for node in traverse_node(stmt): + ast.copy_location(node, assert_) + return self.statements + + def visit_NamedExpr(self, name: namedExpr) -> Tuple[namedExpr, str]: + # This method handles the 'walrus operator' repr of the target + # name if it's a local variable or _should_repr_global_name() + # thinks it's acceptable. + locs = ast.Call(self.builtin("locals"), [], []) + target_id = name.target.id # type: ignore[attr-defined] + inlocs = ast.Compare(astStr(target_id), [ast.In()], [locs]) + dorepr = self.helper("_should_repr_global_name", name) + test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) + expr = ast.IfExp(test, self.display(name), astStr(target_id)) + return name, self.explanation_param(expr) + + def visit_Name(self, name: ast.Name) -> Tuple[ast.Name, str]: + # Display the repr of the name if it's a local variable or + # _should_repr_global_name() thinks it's acceptable. + locs = ast.Call(self.builtin("locals"), [], []) + inlocs = ast.Compare(astStr(name.id), [ast.In()], [locs]) + dorepr = self.helper("_should_repr_global_name", name) + test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) + expr = ast.IfExp(test, self.display(name), astStr(name.id)) + return name, self.explanation_param(expr) + + def visit_BoolOp(self, boolop: ast.BoolOp) -> Tuple[ast.Name, str]: + res_var = self.variable() + expl_list = self.assign(ast.List([], ast.Load())) + app = ast.Attribute(expl_list, "append", ast.Load()) + is_or = int(isinstance(boolop.op, ast.Or)) + body = save = self.statements + fail_save = self.expl_stmts + levels = len(boolop.values) - 1 + self.push_format_context() + # Process each operand, short-circuiting if needed. + for i, v in enumerate(boolop.values): + if i: + fail_inner: List[ast.stmt] = [] + # cond is set in a prior loop iteration below + self.expl_stmts.append(ast.If(cond, fail_inner, [])) # noqa + self.expl_stmts = fail_inner + # Check if the left operand is a namedExpr and the value has already been visited + if ( + isinstance(v, ast.Compare) + and isinstance(v.left, namedExpr) + and v.left.target.id + in [ + ast_expr.id + for ast_expr in boolop.values[:i] + if hasattr(ast_expr, "id") + ] + ): + pytest_temp = self.variable() + self.variables_overwrite[self.scope][ + v.left.target.id + ] = v.left # type:ignore[assignment] + v.left.target.id = pytest_temp + self.push_format_context() + res, expl = self.visit(v) + body.append(ast.Assign([ast.Name(res_var, ast.Store())], res)) + expl_format = self.pop_format_context(astStr(expl)) + call = ast.Call(app, [expl_format], []) + self.expl_stmts.append(ast.Expr(call)) + if i < levels: + cond: ast.expr = res + if is_or: + cond = ast.UnaryOp(ast.Not(), cond) + inner: List[ast.stmt] = [] + self.statements.append(ast.If(cond, inner, [])) + self.statements = body = inner + self.statements = save + self.expl_stmts = fail_save + expl_template = self.helper("_format_boolop", expl_list, astNum(is_or)) + expl = self.pop_format_context(expl_template) + return ast.Name(res_var, ast.Load()), self.explanation_param(expl) + + def visit_UnaryOp(self, unary: ast.UnaryOp) -> Tuple[ast.Name, str]: + pattern = UNARY_MAP[unary.op.__class__] + operand_res, operand_expl = self.visit(unary.operand) + res = self.assign(ast.UnaryOp(unary.op, operand_res)) + return res, pattern % (operand_expl,) + + def visit_BinOp(self, binop: ast.BinOp) -> Tuple[ast.Name, str]: + symbol = BINOP_MAP[binop.op.__class__] + left_expr, left_expl = self.visit(binop.left) + right_expr, right_expl = self.visit(binop.right) + explanation = f"({left_expl} {symbol} {right_expl})" + res = self.assign(ast.BinOp(left_expr, binop.op, right_expr)) + return res, explanation + + def visit_Call(self, call: ast.Call) -> Tuple[ast.Name, str]: + new_func, func_expl = self.visit(call.func) + arg_expls = [] + new_args = [] + new_kwargs = [] + for arg in call.args: + if isinstance(arg, ast.Name) and arg.id in self.variables_overwrite.get( + self.scope, {} + ): + arg = self.variables_overwrite[self.scope][ + arg.id + ] # type:ignore[assignment] + res, expl = self.visit(arg) + arg_expls.append(expl) + new_args.append(res) + for keyword in call.keywords: + if isinstance( + keyword.value, ast.Name + ) and keyword.value.id in self.variables_overwrite.get(self.scope, {}): + keyword.value = self.variables_overwrite[self.scope][ + keyword.value.id + ] # type:ignore[assignment] + res, expl = self.visit(keyword.value) + new_kwargs.append(ast.keyword(keyword.arg, res)) + if keyword.arg: + arg_expls.append(keyword.arg + "=" + expl) + else: # **args have `arg` keywords with an .arg of None + arg_expls.append("**" + expl) + + expl = "{}({})".format(func_expl, ", ".join(arg_expls)) + new_call = ast.Call(new_func, new_args, new_kwargs) + res = self.assign(new_call) + res_expl = self.explanation_param(self.display(res)) + outer_expl = f"{res_expl}\n{{{res_expl} = {expl}\n}}" + return res, outer_expl + + def visit_Starred(self, starred: ast.Starred) -> Tuple[ast.Starred, str]: + # A Starred node can appear in a function call. + res, expl = self.visit(starred.value) + new_starred = ast.Starred(res, starred.ctx) + return new_starred, "*" + expl + + def visit_Attribute(self, attr: ast.Attribute) -> Tuple[ast.Name, str]: + if not isinstance(attr.ctx, ast.Load): + return self.generic_visit(attr) + value, value_expl = self.visit(attr.value) + res = self.assign(ast.Attribute(value, attr.attr, ast.Load())) + res_expl = self.explanation_param(self.display(res)) + pat = "%s\n{%s = %s.%s\n}" + expl = pat % (res_expl, res_expl, value_expl, attr.attr) + return res, expl + + def visit_Compare(self, comp: ast.Compare) -> Tuple[ast.expr, str]: + self.push_format_context() + # We first check if we have overwritten a variable in the previous assert + if isinstance( + comp.left, ast.Name + ) and comp.left.id in self.variables_overwrite.get(self.scope, {}): + comp.left = self.variables_overwrite[self.scope][ + comp.left.id + ] # type:ignore[assignment] + if isinstance(comp.left, namedExpr): + self.variables_overwrite[self.scope][ + comp.left.target.id + ] = comp.left # type:ignore[assignment] + left_res, left_expl = self.visit(comp.left) + if isinstance(comp.left, (ast.Compare, ast.BoolOp)): + left_expl = f"({left_expl})" + res_variables = [self.variable() for i in range(len(comp.ops))] + load_names = [ast.Name(v, ast.Load()) for v in res_variables] + store_names = [ast.Name(v, ast.Store()) for v in res_variables] + it = zip(range(len(comp.ops)), comp.ops, comp.comparators) + expls = [] + syms = [] + results = [left_res] + for i, op, next_operand in it: + if ( + isinstance(next_operand, namedExpr) + and isinstance(left_res, ast.Name) + and next_operand.target.id == left_res.id + ): + next_operand.target.id = self.variable() + self.variables_overwrite[self.scope][ + left_res.id + ] = next_operand # type:ignore[assignment] + next_res, next_expl = self.visit(next_operand) + if isinstance(next_operand, (ast.Compare, ast.BoolOp)): + next_expl = f"({next_expl})" + results.append(next_res) + sym = BINOP_MAP[op.__class__] + syms.append(astStr(sym)) + expl = f"{left_expl} {sym} {next_expl}" + expls.append(astStr(expl)) + res_expr = ast.Compare(left_res, [op], [next_res]) + self.statements.append(ast.Assign([store_names[i]], res_expr)) + left_res, left_expl = next_res, next_expl + # Use pytest.assertion.util._reprcompare if that's available. + expl_call = self.helper( + "_call_reprcompare", + ast.Tuple(syms, ast.Load()), + ast.Tuple(load_names, ast.Load()), + ast.Tuple(expls, ast.Load()), + ast.Tuple(results, ast.Load()), + ) + if len(comp.ops) > 1: + res: ast.expr = ast.BoolOp(ast.And(), load_names) + else: + res = load_names[0] + + return res, self.explanation_param(self.pop_format_context(expl_call)) + + +def try_makedirs(cache_dir: Path) -> bool: + """Attempt to create the given directory and sub-directories exist. + + Returns True if successful or if it already exists. + """ + try: + os.makedirs(cache_dir, exist_ok=True) + except (FileNotFoundError, NotADirectoryError, FileExistsError): + # One of the path components was not a directory: + # - we're in a zip file + # - it is a file + return False + except PermissionError: + return False + except OSError as e: + # as of now, EROFS doesn't have an equivalent OSError-subclass + if e.errno == errno.EROFS: + return False + raise + return True + + +def get_cache_dir(file_path: Path) -> Path: + """Return the cache directory to write .pyc files for the given .py file path.""" + if sys.version_info >= (3, 8) and sys.pycache_prefix: + # given: + # prefix = '/tmp/pycs' + # path = '/home/user/proj/test_app.py' + # we want: + # '/tmp/pycs/home/user/proj' + return Path(sys.pycache_prefix) / Path(*file_path.parts[1:-1]) + else: + # classic pycache directory + return file_path.parent / "__pycache__" diff --git a/venv/lib/python3.12/site-packages/_pytest/assertion/truncate.py b/venv/lib/python3.12/site-packages/_pytest/assertion/truncate.py new file mode 100644 index 0000000000..dfd6f65d28 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/assertion/truncate.py @@ -0,0 +1,115 @@ +"""Utilities for truncating assertion output. + +Current default behaviour is to truncate assertion explanations at +~8 terminal lines, unless running in "-vv" mode or running on CI. +""" +from typing import List +from typing import Optional + +from _pytest.assertion import util +from _pytest.nodes import Item + + +DEFAULT_MAX_LINES = 8 +DEFAULT_MAX_CHARS = 8 * 80 +USAGE_MSG = "use '-vv' to show" + + +def truncate_if_required( + explanation: List[str], item: Item, max_length: Optional[int] = None +) -> List[str]: + """Truncate this assertion explanation if the given test item is eligible.""" + if _should_truncate_item(item): + return _truncate_explanation(explanation) + return explanation + + +def _should_truncate_item(item: Item) -> bool: + """Whether or not this test item is eligible for truncation.""" + verbose = item.config.option.verbose + return verbose < 2 and not util.running_on_ci() + + +def _truncate_explanation( + input_lines: List[str], + max_lines: Optional[int] = None, + max_chars: Optional[int] = None, +) -> List[str]: + """Truncate given list of strings that makes up the assertion explanation. + + Truncates to either 8 lines, or 640 characters - whichever the input reaches + first, taking the truncation explanation into account. The remaining lines + will be replaced by a usage message. + """ + if max_lines is None: + max_lines = DEFAULT_MAX_LINES + if max_chars is None: + max_chars = DEFAULT_MAX_CHARS + + # Check if truncation required + input_char_count = len("".join(input_lines)) + # The length of the truncation explanation depends on the number of lines + # removed but is at least 68 characters: + # The real value is + # 64 (for the base message: + # '...\n...Full output truncated (1 line hidden), use '-vv' to show")' + # ) + # + 1 (for plural) + # + int(math.log10(len(input_lines) - max_lines)) (number of hidden line, at least 1) + # + 3 for the '...' added to the truncated line + # But if there's more than 100 lines it's very likely that we're going to + # truncate, so we don't need the exact value using log10. + tolerable_max_chars = ( + max_chars + 70 # 64 + 1 (for plural) + 2 (for '99') + 3 for '...' + ) + # The truncation explanation add two lines to the output + tolerable_max_lines = max_lines + 2 + if ( + len(input_lines) <= tolerable_max_lines + and input_char_count <= tolerable_max_chars + ): + return input_lines + # Truncate first to max_lines, and then truncate to max_chars if necessary + truncated_explanation = input_lines[:max_lines] + truncated_char = True + # We reevaluate the need to truncate chars following removal of some lines + if len("".join(truncated_explanation)) > tolerable_max_chars: + truncated_explanation = _truncate_by_char_count( + truncated_explanation, max_chars + ) + else: + truncated_char = False + + truncated_line_count = len(input_lines) - len(truncated_explanation) + if truncated_explanation[-1]: + # Add ellipsis and take into account part-truncated final line + truncated_explanation[-1] = truncated_explanation[-1] + "..." + if truncated_char: + # It's possible that we did not remove any char from this line + truncated_line_count += 1 + else: + # Add proper ellipsis when we were able to fit a full line exactly + truncated_explanation[-1] = "..." + return truncated_explanation + [ + "", + f"...Full output truncated ({truncated_line_count} line" + f"{'' if truncated_line_count == 1 else 's'} hidden), {USAGE_MSG}", + ] + + +def _truncate_by_char_count(input_lines: List[str], max_chars: int) -> List[str]: + # Find point at which input length exceeds total allowed length + iterated_char_count = 0 + for iterated_index, input_line in enumerate(input_lines): + if iterated_char_count + len(input_line) > max_chars: + break + iterated_char_count += len(input_line) + + # Create truncated explanation with modified final line + truncated_result = input_lines[:iterated_index] + final_line = input_lines[iterated_index] + if final_line: + final_line_truncate_point = max_chars - iterated_char_count + final_line = final_line[:final_line_truncate_point] + truncated_result.append(final_line) + return truncated_result diff --git a/venv/lib/python3.12/site-packages/_pytest/assertion/util.py b/venv/lib/python3.12/site-packages/_pytest/assertion/util.py new file mode 100644 index 0000000000..39ca5403e0 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/assertion/util.py @@ -0,0 +1,522 @@ +"""Utilities for assertion debugging.""" +import collections.abc +import os +import pprint +from typing import AbstractSet +from typing import Any +from typing import Callable +from typing import Iterable +from typing import List +from typing import Mapping +from typing import Optional +from typing import Sequence +from unicodedata import normalize + +import _pytest._code +from _pytest import outcomes +from _pytest._io.saferepr import _pformat_dispatch +from _pytest._io.saferepr import saferepr +from _pytest._io.saferepr import saferepr_unlimited +from _pytest.config import Config + +# The _reprcompare attribute on the util module is used by the new assertion +# interpretation code and assertion rewriter to detect this plugin was +# loaded and in turn call the hooks defined here as part of the +# DebugInterpreter. +_reprcompare: Optional[Callable[[str, object, object], Optional[str]]] = None + +# Works similarly as _reprcompare attribute. Is populated with the hook call +# when pytest_runtest_setup is called. +_assertion_pass: Optional[Callable[[int, str, str], None]] = None + +# Config object which is assigned during pytest_runtest_protocol. +_config: Optional[Config] = None + + +def format_explanation(explanation: str) -> str: + r"""Format an explanation. + + Normally all embedded newlines are escaped, however there are + three exceptions: \n{, \n} and \n~. The first two are intended + cover nested explanations, see function and attribute explanations + for examples (.visit_Call(), visit_Attribute()). The last one is + for when one explanation needs to span multiple lines, e.g. when + displaying diffs. + """ + lines = _split_explanation(explanation) + result = _format_lines(lines) + return "\n".join(result) + + +def _split_explanation(explanation: str) -> List[str]: + r"""Return a list of individual lines in the explanation. + + This will return a list of lines split on '\n{', '\n}' and '\n~'. + Any other newlines will be escaped and appear in the line as the + literal '\n' characters. + """ + raw_lines = (explanation or "").split("\n") + lines = [raw_lines[0]] + for values in raw_lines[1:]: + if values and values[0] in ["{", "}", "~", ">"]: + lines.append(values) + else: + lines[-1] += "\\n" + values + return lines + + +def _format_lines(lines: Sequence[str]) -> List[str]: + """Format the individual lines. + + This will replace the '{', '}' and '~' characters of our mini formatting + language with the proper 'where ...', 'and ...' and ' + ...' text, taking + care of indentation along the way. + + Return a list of formatted lines. + """ + result = list(lines[:1]) + stack = [0] + stackcnt = [0] + for line in lines[1:]: + if line.startswith("{"): + if stackcnt[-1]: + s = "and " + else: + s = "where " + stack.append(len(result)) + stackcnt[-1] += 1 + stackcnt.append(0) + result.append(" +" + " " * (len(stack) - 1) + s + line[1:]) + elif line.startswith("}"): + stack.pop() + stackcnt.pop() + result[stack[-1]] += line[1:] + else: + assert line[0] in ["~", ">"] + stack[-1] += 1 + indent = len(stack) if line.startswith("~") else len(stack) - 1 + result.append(" " * indent + line[1:]) + assert len(stack) == 1 + return result + + +def issequence(x: Any) -> bool: + return isinstance(x, collections.abc.Sequence) and not isinstance(x, str) + + +def istext(x: Any) -> bool: + return isinstance(x, str) + + +def isdict(x: Any) -> bool: + return isinstance(x, dict) + + +def isset(x: Any) -> bool: + return isinstance(x, (set, frozenset)) + + +def isnamedtuple(obj: Any) -> bool: + return isinstance(obj, tuple) and getattr(obj, "_fields", None) is not None + + +def isdatacls(obj: Any) -> bool: + return getattr(obj, "__dataclass_fields__", None) is not None + + +def isattrs(obj: Any) -> bool: + return getattr(obj, "__attrs_attrs__", None) is not None + + +def isiterable(obj: Any) -> bool: + try: + iter(obj) + return not istext(obj) + except Exception: + return False + + +def has_default_eq( + obj: object, +) -> bool: + """Check if an instance of an object contains the default eq + + First, we check if the object's __eq__ attribute has __code__, + if so, we check the equally of the method code filename (__code__.co_filename) + to the default one generated by the dataclass and attr module + for dataclasses the default co_filename is , for attrs class, the __eq__ should contain "attrs eq generated" + """ + # inspired from https://github.com/willmcgugan/rich/blob/07d51ffc1aee6f16bd2e5a25b4e82850fb9ed778/rich/pretty.py#L68 + if hasattr(obj.__eq__, "__code__") and hasattr(obj.__eq__.__code__, "co_filename"): + code_filename = obj.__eq__.__code__.co_filename + + if isattrs(obj): + return "attrs generated eq" in code_filename + + return code_filename == "" # data class + return True + + +def assertrepr_compare( + config, op: str, left: Any, right: Any, use_ascii: bool = False +) -> Optional[List[str]]: + """Return specialised explanations for some operators/operands.""" + verbose = config.getoption("verbose") + + # Strings which normalize equal are often hard to distinguish when printed; use ascii() to make this easier. + # See issue #3246. + use_ascii = ( + isinstance(left, str) + and isinstance(right, str) + and normalize("NFD", left) == normalize("NFD", right) + ) + + if verbose > 1: + left_repr = saferepr_unlimited(left, use_ascii=use_ascii) + right_repr = saferepr_unlimited(right, use_ascii=use_ascii) + else: + # XXX: "15 chars indentation" is wrong + # ("E AssertionError: assert "); should use term width. + maxsize = ( + 80 - 15 - len(op) - 2 + ) // 2 # 15 chars indentation, 1 space around op + + left_repr = saferepr(left, maxsize=maxsize, use_ascii=use_ascii) + right_repr = saferepr(right, maxsize=maxsize, use_ascii=use_ascii) + + summary = f"{left_repr} {op} {right_repr}" + + explanation = None + try: + if op == "==": + explanation = _compare_eq_any(left, right, verbose) + elif op == "not in": + if istext(left) and istext(right): + explanation = _notin_text(left, right, verbose) + except outcomes.Exit: + raise + except Exception: + explanation = [ + "(pytest_assertion plugin: representation of details failed: {}.".format( + _pytest._code.ExceptionInfo.from_current()._getreprcrash() + ), + " Probably an object has a faulty __repr__.)", + ] + + if not explanation: + return None + + return [summary] + explanation + + +def _compare_eq_any(left: Any, right: Any, verbose: int = 0) -> List[str]: + explanation = [] + if istext(left) and istext(right): + explanation = _diff_text(left, right, verbose) + else: + from _pytest.python_api import ApproxBase + + if isinstance(left, ApproxBase) or isinstance(right, ApproxBase): + # Although the common order should be obtained == expected, this ensures both ways + approx_side = left if isinstance(left, ApproxBase) else right + other_side = right if isinstance(left, ApproxBase) else left + + explanation = approx_side._repr_compare(other_side) + elif type(left) == type(right) and ( + isdatacls(left) or isattrs(left) or isnamedtuple(left) + ): + # Note: unlike dataclasses/attrs, namedtuples compare only the + # field values, not the type or field names. But this branch + # intentionally only handles the same-type case, which was often + # used in older code bases before dataclasses/attrs were available. + explanation = _compare_eq_cls(left, right, verbose) + elif issequence(left) and issequence(right): + explanation = _compare_eq_sequence(left, right, verbose) + elif isset(left) and isset(right): + explanation = _compare_eq_set(left, right, verbose) + elif isdict(left) and isdict(right): + explanation = _compare_eq_dict(left, right, verbose) + + if isiterable(left) and isiterable(right): + expl = _compare_eq_iterable(left, right, verbose) + explanation.extend(expl) + + return explanation + + +def _diff_text(left: str, right: str, verbose: int = 0) -> List[str]: + """Return the explanation for the diff between text. + + Unless --verbose is used this will skip leading and trailing + characters which are identical to keep the diff minimal. + """ + from difflib import ndiff + + explanation: List[str] = [] + + if verbose < 1: + i = 0 # just in case left or right has zero length + for i in range(min(len(left), len(right))): + if left[i] != right[i]: + break + if i > 42: + i -= 10 # Provide some context + explanation = [ + "Skipping %s identical leading characters in diff, use -v to show" % i + ] + left = left[i:] + right = right[i:] + if len(left) == len(right): + for i in range(len(left)): + if left[-i] != right[-i]: + break + if i > 42: + i -= 10 # Provide some context + explanation += [ + "Skipping {} identical trailing " + "characters in diff, use -v to show".format(i) + ] + left = left[:-i] + right = right[:-i] + keepends = True + if left.isspace() or right.isspace(): + left = repr(str(left)) + right = repr(str(right)) + explanation += ["Strings contain only whitespace, escaping them using repr()"] + # "right" is the expected base against which we compare "left", + # see https://github.com/pytest-dev/pytest/issues/3333 + explanation += [ + line.strip("\n") + for line in ndiff(right.splitlines(keepends), left.splitlines(keepends)) + ] + return explanation + + +def _surrounding_parens_on_own_lines(lines: List[str]) -> None: + """Move opening/closing parenthesis/bracket to own lines.""" + opening = lines[0][:1] + if opening in ["(", "[", "{"]: + lines[0] = " " + lines[0][1:] + lines[:] = [opening] + lines + closing = lines[-1][-1:] + if closing in [")", "]", "}"]: + lines[-1] = lines[-1][:-1] + "," + lines[:] = lines + [closing] + + +def _compare_eq_iterable( + left: Iterable[Any], right: Iterable[Any], verbose: int = 0 +) -> List[str]: + if verbose <= 0 and not running_on_ci(): + return ["Use -v to get more diff"] + # dynamic import to speedup pytest + import difflib + + left_formatting = pprint.pformat(left).splitlines() + right_formatting = pprint.pformat(right).splitlines() + + # Re-format for different output lengths. + lines_left = len(left_formatting) + lines_right = len(right_formatting) + if lines_left != lines_right: + left_formatting = _pformat_dispatch(left).splitlines() + right_formatting = _pformat_dispatch(right).splitlines() + + if lines_left > 1 or lines_right > 1: + _surrounding_parens_on_own_lines(left_formatting) + _surrounding_parens_on_own_lines(right_formatting) + + explanation = ["Full diff:"] + # "right" is the expected base against which we compare "left", + # see https://github.com/pytest-dev/pytest/issues/3333 + explanation.extend( + line.rstrip() for line in difflib.ndiff(right_formatting, left_formatting) + ) + return explanation + + +def _compare_eq_sequence( + left: Sequence[Any], right: Sequence[Any], verbose: int = 0 +) -> List[str]: + comparing_bytes = isinstance(left, bytes) and isinstance(right, bytes) + explanation: List[str] = [] + len_left = len(left) + len_right = len(right) + for i in range(min(len_left, len_right)): + if left[i] != right[i]: + if comparing_bytes: + # when comparing bytes, we want to see their ascii representation + # instead of their numeric values (#5260) + # using a slice gives us the ascii representation: + # >>> s = b'foo' + # >>> s[0] + # 102 + # >>> s[0:1] + # b'f' + left_value = left[i : i + 1] + right_value = right[i : i + 1] + else: + left_value = left[i] + right_value = right[i] + + explanation += [f"At index {i} diff: {left_value!r} != {right_value!r}"] + break + + if comparing_bytes: + # when comparing bytes, it doesn't help to show the "sides contain one or more + # items" longer explanation, so skip it + + return explanation + + len_diff = len_left - len_right + if len_diff: + if len_diff > 0: + dir_with_more = "Left" + extra = saferepr(left[len_right]) + else: + len_diff = 0 - len_diff + dir_with_more = "Right" + extra = saferepr(right[len_left]) + + if len_diff == 1: + explanation += [f"{dir_with_more} contains one more item: {extra}"] + else: + explanation += [ + "%s contains %d more items, first extra item: %s" + % (dir_with_more, len_diff, extra) + ] + return explanation + + +def _compare_eq_set( + left: AbstractSet[Any], right: AbstractSet[Any], verbose: int = 0 +) -> List[str]: + explanation = [] + diff_left = left - right + diff_right = right - left + if diff_left: + explanation.append("Extra items in the left set:") + for item in diff_left: + explanation.append(saferepr(item)) + if diff_right: + explanation.append("Extra items in the right set:") + for item in diff_right: + explanation.append(saferepr(item)) + return explanation + + +def _compare_eq_dict( + left: Mapping[Any, Any], right: Mapping[Any, Any], verbose: int = 0 +) -> List[str]: + explanation: List[str] = [] + set_left = set(left) + set_right = set(right) + common = set_left.intersection(set_right) + same = {k: left[k] for k in common if left[k] == right[k]} + if same and verbose < 2: + explanation += ["Omitting %s identical items, use -vv to show" % len(same)] + elif same: + explanation += ["Common items:"] + explanation += pprint.pformat(same).splitlines() + diff = {k for k in common if left[k] != right[k]} + if diff: + explanation += ["Differing items:"] + for k in diff: + explanation += [saferepr({k: left[k]}) + " != " + saferepr({k: right[k]})] + extra_left = set_left - set_right + len_extra_left = len(extra_left) + if len_extra_left: + explanation.append( + "Left contains %d more item%s:" + % (len_extra_left, "" if len_extra_left == 1 else "s") + ) + explanation.extend( + pprint.pformat({k: left[k] for k in extra_left}).splitlines() + ) + extra_right = set_right - set_left + len_extra_right = len(extra_right) + if len_extra_right: + explanation.append( + "Right contains %d more item%s:" + % (len_extra_right, "" if len_extra_right == 1 else "s") + ) + explanation.extend( + pprint.pformat({k: right[k] for k in extra_right}).splitlines() + ) + return explanation + + +def _compare_eq_cls(left: Any, right: Any, verbose: int) -> List[str]: + if not has_default_eq(left): + return [] + if isdatacls(left): + import dataclasses + + all_fields = dataclasses.fields(left) + fields_to_check = [info.name for info in all_fields if info.compare] + elif isattrs(left): + all_fields = left.__attrs_attrs__ + fields_to_check = [field.name for field in all_fields if getattr(field, "eq")] + elif isnamedtuple(left): + fields_to_check = left._fields + else: + assert False + + indent = " " + same = [] + diff = [] + for field in fields_to_check: + if getattr(left, field) == getattr(right, field): + same.append(field) + else: + diff.append(field) + + explanation = [] + if same or diff: + explanation += [""] + if same and verbose < 2: + explanation.append("Omitting %s identical items, use -vv to show" % len(same)) + elif same: + explanation += ["Matching attributes:"] + explanation += pprint.pformat(same).splitlines() + if diff: + explanation += ["Differing attributes:"] + explanation += pprint.pformat(diff).splitlines() + for field in diff: + field_left = getattr(left, field) + field_right = getattr(right, field) + explanation += [ + "", + "Drill down into differing attribute %s:" % field, + ("%s%s: %r != %r") % (indent, field, field_left, field_right), + ] + explanation += [ + indent + line + for line in _compare_eq_any(field_left, field_right, verbose) + ] + return explanation + + +def _notin_text(term: str, text: str, verbose: int = 0) -> List[str]: + index = text.find(term) + head = text[:index] + tail = text[index + len(term) :] + correct_text = head + tail + diff = _diff_text(text, correct_text, verbose) + newdiff = ["%s is contained here:" % saferepr(term, maxsize=42)] + for line in diff: + if line.startswith("Skipping"): + continue + if line.startswith("- "): + continue + if line.startswith("+ "): + newdiff.append(" " + line[2:]) + else: + newdiff.append(line) + return newdiff + + +def running_on_ci() -> bool: + """Check if we're currently running on a CI system.""" + env_vars = ["CI", "BUILD_NUMBER"] + return any(var in os.environ for var in env_vars) diff --git a/venv/lib/python3.12/site-packages/_pytest/cacheprovider.py b/venv/lib/python3.12/site-packages/_pytest/cacheprovider.py new file mode 100644 index 0000000000..1ecb865058 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/cacheprovider.py @@ -0,0 +1,602 @@ +"""Implementation of the cache provider.""" +# This plugin was not named "cache" to avoid conflicts with the external +# pytest-cache version. +import dataclasses +import json +import os +from pathlib import Path +from typing import Dict +from typing import Generator +from typing import Iterable +from typing import List +from typing import Optional +from typing import Set +from typing import Union + +from .pathlib import resolve_from_str +from .pathlib import rm_rf +from .reports import CollectReport +from _pytest import nodes +from _pytest._io import TerminalWriter +from _pytest.compat import final +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.main import Session +from _pytest.nodes import File +from _pytest.python import Package +from _pytest.reports import TestReport + +README_CONTENT = """\ +# pytest cache directory # + +This directory contains data from the pytest's cache plugin, +which provides the `--lf` and `--ff` options, as well as the `cache` fixture. + +**Do not** commit this to version control. + +See [the docs](https://docs.pytest.org/en/stable/how-to/cache.html) for more information. +""" + +CACHEDIR_TAG_CONTENT = b"""\ +Signature: 8a477f597d28d172789f06886806bc55 +# This file is a cache directory tag created by pytest. +# For information about cache directory tags, see: +# https://bford.info/cachedir/spec.html +""" + + +@final +@dataclasses.dataclass +class Cache: + """Instance of the `cache` fixture.""" + + _cachedir: Path = dataclasses.field(repr=False) + _config: Config = dataclasses.field(repr=False) + + # Sub-directory under cache-dir for directories created by `mkdir()`. + _CACHE_PREFIX_DIRS = "d" + + # Sub-directory under cache-dir for values created by `set()`. + _CACHE_PREFIX_VALUES = "v" + + def __init__( + self, cachedir: Path, config: Config, *, _ispytest: bool = False + ) -> None: + check_ispytest(_ispytest) + self._cachedir = cachedir + self._config = config + + @classmethod + def for_config(cls, config: Config, *, _ispytest: bool = False) -> "Cache": + """Create the Cache instance for a Config. + + :meta private: + """ + check_ispytest(_ispytest) + cachedir = cls.cache_dir_from_config(config, _ispytest=True) + if config.getoption("cacheclear") and cachedir.is_dir(): + cls.clear_cache(cachedir, _ispytest=True) + return cls(cachedir, config, _ispytest=True) + + @classmethod + def clear_cache(cls, cachedir: Path, _ispytest: bool = False) -> None: + """Clear the sub-directories used to hold cached directories and values. + + :meta private: + """ + check_ispytest(_ispytest) + for prefix in (cls._CACHE_PREFIX_DIRS, cls._CACHE_PREFIX_VALUES): + d = cachedir / prefix + if d.is_dir(): + rm_rf(d) + + @staticmethod + def cache_dir_from_config(config: Config, *, _ispytest: bool = False) -> Path: + """Get the path to the cache directory for a Config. + + :meta private: + """ + check_ispytest(_ispytest) + return resolve_from_str(config.getini("cache_dir"), config.rootpath) + + def warn(self, fmt: str, *, _ispytest: bool = False, **args: object) -> None: + """Issue a cache warning. + + :meta private: + """ + check_ispytest(_ispytest) + import warnings + from _pytest.warning_types import PytestCacheWarning + + warnings.warn( + PytestCacheWarning(fmt.format(**args) if args else fmt), + self._config.hook, + stacklevel=3, + ) + + def mkdir(self, name: str) -> Path: + """Return a directory path object with the given name. + + If the directory does not yet exist, it will be created. You can use + it to manage files to e.g. store/retrieve database dumps across test + sessions. + + .. versionadded:: 7.0 + + :param name: + Must be a string not containing a ``/`` separator. + Make sure the name contains your plugin or application + identifiers to prevent clashes with other cache users. + """ + path = Path(name) + if len(path.parts) > 1: + raise ValueError("name is not allowed to contain path separators") + res = self._cachedir.joinpath(self._CACHE_PREFIX_DIRS, path) + res.mkdir(exist_ok=True, parents=True) + return res + + def _getvaluepath(self, key: str) -> Path: + return self._cachedir.joinpath(self._CACHE_PREFIX_VALUES, Path(key)) + + def get(self, key: str, default): + """Return the cached value for the given key. + + If no value was yet cached or the value cannot be read, the specified + default is returned. + + :param key: + Must be a ``/`` separated value. Usually the first + name is the name of your plugin or your application. + :param default: + The value to return in case of a cache-miss or invalid cache value. + """ + path = self._getvaluepath(key) + try: + with path.open("r", encoding="UTF-8") as f: + return json.load(f) + except (ValueError, OSError): + return default + + def set(self, key: str, value: object) -> None: + """Save value for the given key. + + :param key: + Must be a ``/`` separated value. Usually the first + name is the name of your plugin or your application. + :param value: + Must be of any combination of basic python types, + including nested types like lists of dictionaries. + """ + path = self._getvaluepath(key) + try: + if path.parent.is_dir(): + cache_dir_exists_already = True + else: + cache_dir_exists_already = self._cachedir.exists() + path.parent.mkdir(exist_ok=True, parents=True) + except OSError as exc: + self.warn( + f"could not create cache path {path}: {exc}", + _ispytest=True, + ) + return + if not cache_dir_exists_already: + self._ensure_supporting_files() + data = json.dumps(value, ensure_ascii=False, indent=2) + try: + f = path.open("w", encoding="UTF-8") + except OSError as exc: + self.warn( + f"cache could not write path {path}: {exc}", + _ispytest=True, + ) + else: + with f: + f.write(data) + + def _ensure_supporting_files(self) -> None: + """Create supporting files in the cache dir that are not really part of the cache.""" + readme_path = self._cachedir / "README.md" + readme_path.write_text(README_CONTENT, encoding="UTF-8") + + gitignore_path = self._cachedir.joinpath(".gitignore") + msg = "# Created by pytest automatically.\n*\n" + gitignore_path.write_text(msg, encoding="UTF-8") + + cachedir_tag_path = self._cachedir.joinpath("CACHEDIR.TAG") + cachedir_tag_path.write_bytes(CACHEDIR_TAG_CONTENT) + + +class LFPluginCollWrapper: + def __init__(self, lfplugin: "LFPlugin") -> None: + self.lfplugin = lfplugin + self._collected_at_least_one_failure = False + + @hookimpl(hookwrapper=True) + def pytest_make_collect_report(self, collector: nodes.Collector): + if isinstance(collector, (Session, Package)): + out = yield + res: CollectReport = out.get_result() + + # Sort any lf-paths to the beginning. + lf_paths = self.lfplugin._last_failed_paths + + # Use stable sort to priorize last failed. + def sort_key(node: Union[nodes.Item, nodes.Collector]) -> bool: + # Package.path is the __init__.py file, we need the directory. + if isinstance(node, Package): + path = node.path.parent + else: + path = node.path + return path in lf_paths + + res.result = sorted( + res.result, + key=sort_key, + reverse=True, + ) + return + + elif isinstance(collector, File): + if collector.path in self.lfplugin._last_failed_paths: + out = yield + res = out.get_result() + result = res.result + lastfailed = self.lfplugin.lastfailed + + # Only filter with known failures. + if not self._collected_at_least_one_failure: + if not any(x.nodeid in lastfailed for x in result): + return + self.lfplugin.config.pluginmanager.register( + LFPluginCollSkipfiles(self.lfplugin), "lfplugin-collskip" + ) + self._collected_at_least_one_failure = True + + session = collector.session + result[:] = [ + x + for x in result + if x.nodeid in lastfailed + # Include any passed arguments (not trivial to filter). + or session.isinitpath(x.path) + # Keep all sub-collectors. + or isinstance(x, nodes.Collector) + ] + return + yield + + +class LFPluginCollSkipfiles: + def __init__(self, lfplugin: "LFPlugin") -> None: + self.lfplugin = lfplugin + + @hookimpl + def pytest_make_collect_report( + self, collector: nodes.Collector + ) -> Optional[CollectReport]: + # Packages are Files, but we only want to skip test-bearing Files, + # so don't filter Packages. + if isinstance(collector, File) and not isinstance(collector, Package): + if collector.path not in self.lfplugin._last_failed_paths: + self.lfplugin._skipped_files += 1 + + return CollectReport( + collector.nodeid, "passed", longrepr=None, result=[] + ) + return None + + +class LFPlugin: + """Plugin which implements the --lf (run last-failing) option.""" + + def __init__(self, config: Config) -> None: + self.config = config + active_keys = "lf", "failedfirst" + self.active = any(config.getoption(key) for key in active_keys) + assert config.cache + self.lastfailed: Dict[str, bool] = config.cache.get("cache/lastfailed", {}) + self._previously_failed_count: Optional[int] = None + self._report_status: Optional[str] = None + self._skipped_files = 0 # count skipped files during collection due to --lf + + if config.getoption("lf"): + self._last_failed_paths = self.get_last_failed_paths() + config.pluginmanager.register( + LFPluginCollWrapper(self), "lfplugin-collwrapper" + ) + + def get_last_failed_paths(self) -> Set[Path]: + """Return a set with all Paths of the previously failed nodeids and + their parents.""" + rootpath = self.config.rootpath + result = set() + for nodeid in self.lastfailed: + path = rootpath / nodeid.split("::")[0] + result.add(path) + result.update(path.parents) + return {x for x in result if x.exists()} + + def pytest_report_collectionfinish(self) -> Optional[str]: + if self.active and self.config.getoption("verbose") >= 0: + return "run-last-failure: %s" % self._report_status + return None + + def pytest_runtest_logreport(self, report: TestReport) -> None: + if (report.when == "call" and report.passed) or report.skipped: + self.lastfailed.pop(report.nodeid, None) + elif report.failed: + self.lastfailed[report.nodeid] = True + + def pytest_collectreport(self, report: CollectReport) -> None: + passed = report.outcome in ("passed", "skipped") + if passed: + if report.nodeid in self.lastfailed: + self.lastfailed.pop(report.nodeid) + self.lastfailed.update((item.nodeid, True) for item in report.result) + else: + self.lastfailed[report.nodeid] = True + + @hookimpl(hookwrapper=True, tryfirst=True) + def pytest_collection_modifyitems( + self, config: Config, items: List[nodes.Item] + ) -> Generator[None, None, None]: + yield + + if not self.active: + return + + if self.lastfailed: + previously_failed = [] + previously_passed = [] + for item in items: + if item.nodeid in self.lastfailed: + previously_failed.append(item) + else: + previously_passed.append(item) + self._previously_failed_count = len(previously_failed) + + if not previously_failed: + # Running a subset of all tests with recorded failures + # only outside of it. + self._report_status = "%d known failures not in selected tests" % ( + len(self.lastfailed), + ) + else: + if self.config.getoption("lf"): + items[:] = previously_failed + config.hook.pytest_deselected(items=previously_passed) + else: # --failedfirst + items[:] = previously_failed + previously_passed + + noun = "failure" if self._previously_failed_count == 1 else "failures" + suffix = " first" if self.config.getoption("failedfirst") else "" + self._report_status = "rerun previous {count} {noun}{suffix}".format( + count=self._previously_failed_count, suffix=suffix, noun=noun + ) + + if self._skipped_files > 0: + files_noun = "file" if self._skipped_files == 1 else "files" + self._report_status += " (skipped {files} {files_noun})".format( + files=self._skipped_files, files_noun=files_noun + ) + else: + self._report_status = "no previously failed tests, " + if self.config.getoption("last_failed_no_failures") == "none": + self._report_status += "deselecting all items." + config.hook.pytest_deselected(items=items[:]) + items[:] = [] + else: + self._report_status += "not deselecting items." + + def pytest_sessionfinish(self, session: Session) -> None: + config = self.config + if config.getoption("cacheshow") or hasattr(config, "workerinput"): + return + + assert config.cache is not None + saved_lastfailed = config.cache.get("cache/lastfailed", {}) + if saved_lastfailed != self.lastfailed: + config.cache.set("cache/lastfailed", self.lastfailed) + + +class NFPlugin: + """Plugin which implements the --nf (run new-first) option.""" + + def __init__(self, config: Config) -> None: + self.config = config + self.active = config.option.newfirst + assert config.cache is not None + self.cached_nodeids = set(config.cache.get("cache/nodeids", [])) + + @hookimpl(hookwrapper=True, tryfirst=True) + def pytest_collection_modifyitems( + self, items: List[nodes.Item] + ) -> Generator[None, None, None]: + yield + + if self.active: + new_items: Dict[str, nodes.Item] = {} + other_items: Dict[str, nodes.Item] = {} + for item in items: + if item.nodeid not in self.cached_nodeids: + new_items[item.nodeid] = item + else: + other_items[item.nodeid] = item + + items[:] = self._get_increasing_order( + new_items.values() + ) + self._get_increasing_order(other_items.values()) + self.cached_nodeids.update(new_items) + else: + self.cached_nodeids.update(item.nodeid for item in items) + + def _get_increasing_order(self, items: Iterable[nodes.Item]) -> List[nodes.Item]: + return sorted(items, key=lambda item: item.path.stat().st_mtime, reverse=True) # type: ignore[no-any-return] + + def pytest_sessionfinish(self) -> None: + config = self.config + if config.getoption("cacheshow") or hasattr(config, "workerinput"): + return + + if config.getoption("collectonly"): + return + + assert config.cache is not None + config.cache.set("cache/nodeids", sorted(self.cached_nodeids)) + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group.addoption( + "--lf", + "--last-failed", + action="store_true", + dest="lf", + help="Rerun only the tests that failed " + "at the last run (or all if none failed)", + ) + group.addoption( + "--ff", + "--failed-first", + action="store_true", + dest="failedfirst", + help="Run all tests, but run the last failures first. " + "This may re-order tests and thus lead to " + "repeated fixture setup/teardown.", + ) + group.addoption( + "--nf", + "--new-first", + action="store_true", + dest="newfirst", + help="Run tests from new files first, then the rest of the tests " + "sorted by file mtime", + ) + group.addoption( + "--cache-show", + action="append", + nargs="?", + dest="cacheshow", + help=( + "Show cache contents, don't perform collection or tests. " + "Optional argument: glob (default: '*')." + ), + ) + group.addoption( + "--cache-clear", + action="store_true", + dest="cacheclear", + help="Remove all cache contents at start of test run", + ) + cache_dir_default = ".pytest_cache" + if "TOX_ENV_DIR" in os.environ: + cache_dir_default = os.path.join(os.environ["TOX_ENV_DIR"], cache_dir_default) + parser.addini("cache_dir", default=cache_dir_default, help="Cache directory path") + group.addoption( + "--lfnf", + "--last-failed-no-failures", + action="store", + dest="last_failed_no_failures", + choices=("all", "none"), + default="all", + help="With ``--lf``, determines whether to execute tests when there " + "are no previously (known) failures or when no " + "cached ``lastfailed`` data was found. " + "``all`` (the default) runs the full test suite again. " + "``none`` just emits a message about no known failures and exits successfully.", + ) + + +def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]: + if config.option.cacheshow and not config.option.help: + from _pytest.main import wrap_session + + return wrap_session(config, cacheshow) + return None + + +@hookimpl(tryfirst=True) +def pytest_configure(config: Config) -> None: + config.cache = Cache.for_config(config, _ispytest=True) + config.pluginmanager.register(LFPlugin(config), "lfplugin") + config.pluginmanager.register(NFPlugin(config), "nfplugin") + + +@fixture +def cache(request: FixtureRequest) -> Cache: + """Return a cache object that can persist state between testing sessions. + + cache.get(key, default) + cache.set(key, value) + + Keys must be ``/`` separated strings, where the first part is usually the + name of your plugin or application to avoid clashes with other cache users. + + Values can be any object handled by the json stdlib module. + """ + assert request.config.cache is not None + return request.config.cache + + +def pytest_report_header(config: Config) -> Optional[str]: + """Display cachedir with --cache-show and if non-default.""" + if config.option.verbose > 0 or config.getini("cache_dir") != ".pytest_cache": + assert config.cache is not None + cachedir = config.cache._cachedir + # TODO: evaluate generating upward relative paths + # starting with .., ../.. if sensible + + try: + displaypath = cachedir.relative_to(config.rootpath) + except ValueError: + displaypath = cachedir + return f"cachedir: {displaypath}" + return None + + +def cacheshow(config: Config, session: Session) -> int: + from pprint import pformat + + assert config.cache is not None + + tw = TerminalWriter() + tw.line("cachedir: " + str(config.cache._cachedir)) + if not config.cache._cachedir.is_dir(): + tw.line("cache is empty") + return 0 + + glob = config.option.cacheshow[0] + if glob is None: + glob = "*" + + dummy = object() + basedir = config.cache._cachedir + vdir = basedir / Cache._CACHE_PREFIX_VALUES + tw.sep("-", "cache values for %r" % glob) + for valpath in sorted(x for x in vdir.rglob(glob) if x.is_file()): + key = str(valpath.relative_to(vdir)) + val = config.cache.get(key, dummy) + if val is dummy: + tw.line("%s contains unreadable content, will be ignored" % key) + else: + tw.line("%s contains:" % key) + for line in pformat(val).splitlines(): + tw.line(" " + line) + + ddir = basedir / Cache._CACHE_PREFIX_DIRS + if ddir.is_dir(): + contents = sorted(ddir.rglob(glob)) + tw.sep("-", "cache directories for %r" % glob) + for p in contents: + # if p.is_dir(): + # print("%s/" % p.relative_to(basedir)) + if p.is_file(): + key = str(p.relative_to(basedir)) + tw.line(f"{key} is a file of length {p.stat().st_size:d}") + return 0 diff --git a/venv/lib/python3.12/site-packages/_pytest/capture.py b/venv/lib/python3.12/site-packages/_pytest/capture.py new file mode 100644 index 0000000000..a8ca0869f3 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/capture.py @@ -0,0 +1,1082 @@ +"""Per-test stdout/stderr capturing mechanism.""" +import abc +import collections +import contextlib +import io +import os +import sys +from io import UnsupportedOperation +from tempfile import TemporaryFile +from types import TracebackType +from typing import Any +from typing import AnyStr +from typing import BinaryIO +from typing import Generator +from typing import Generic +from typing import Iterable +from typing import Iterator +from typing import List +from typing import NamedTuple +from typing import Optional +from typing import TextIO +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import Union + +from _pytest.compat import final +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import SubRequest +from _pytest.nodes import Collector +from _pytest.nodes import File +from _pytest.nodes import Item + +if TYPE_CHECKING: + from typing_extensions import Final + from typing_extensions import Literal + + _CaptureMethod = Literal["fd", "sys", "no", "tee-sys"] + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group._addoption( + "--capture", + action="store", + default="fd", + metavar="method", + choices=["fd", "sys", "no", "tee-sys"], + help="Per-test capturing method: one of fd|sys|no|tee-sys", + ) + group._addoption( + "-s", + action="store_const", + const="no", + dest="capture", + help="Shortcut for --capture=no", + ) + + +def _colorama_workaround() -> None: + """Ensure colorama is imported so that it attaches to the correct stdio + handles on Windows. + + colorama uses the terminal on import time. So if something does the + first import of colorama while I/O capture is active, colorama will + fail in various ways. + """ + if sys.platform.startswith("win32"): + try: + import colorama # noqa: F401 + except ImportError: + pass + + +def _windowsconsoleio_workaround(stream: TextIO) -> None: + """Workaround for Windows Unicode console handling. + + Python 3.6 implemented Unicode console handling for Windows. This works + by reading/writing to the raw console handle using + ``{Read,Write}ConsoleW``. + + The problem is that we are going to ``dup2`` over the stdio file + descriptors when doing ``FDCapture`` and this will ``CloseHandle`` the + handles used by Python to write to the console. Though there is still some + weirdness and the console handle seems to only be closed randomly and not + on the first call to ``CloseHandle``, or maybe it gets reopened with the + same handle value when we suspend capturing. + + The workaround in this case will reopen stdio with a different fd which + also means a different handle by replicating the logic in + "Py_lifecycle.c:initstdio/create_stdio". + + :param stream: + In practice ``sys.stdout`` or ``sys.stderr``, but given + here as parameter for unittesting purposes. + + See https://github.com/pytest-dev/py/issues/103. + """ + if not sys.platform.startswith("win32") or hasattr(sys, "pypy_version_info"): + return + + # Bail out if ``stream`` doesn't seem like a proper ``io`` stream (#2666). + if not hasattr(stream, "buffer"): # type: ignore[unreachable] + return + + buffered = hasattr(stream.buffer, "raw") + raw_stdout = stream.buffer.raw if buffered else stream.buffer # type: ignore[attr-defined] + + if not isinstance(raw_stdout, io._WindowsConsoleIO): # type: ignore[attr-defined] + return + + def _reopen_stdio(f, mode): + if not buffered and mode[0] == "w": + buffering = 0 + else: + buffering = -1 + + return io.TextIOWrapper( + open(os.dup(f.fileno()), mode, buffering), + f.encoding, + f.errors, + f.newlines, + f.line_buffering, + ) + + sys.stdin = _reopen_stdio(sys.stdin, "rb") + sys.stdout = _reopen_stdio(sys.stdout, "wb") + sys.stderr = _reopen_stdio(sys.stderr, "wb") + + +@hookimpl(hookwrapper=True) +def pytest_load_initial_conftests(early_config: Config): + ns = early_config.known_args_namespace + if ns.capture == "fd": + _windowsconsoleio_workaround(sys.stdout) + _colorama_workaround() + pluginmanager = early_config.pluginmanager + capman = CaptureManager(ns.capture) + pluginmanager.register(capman, "capturemanager") + + # Make sure that capturemanager is properly reset at final shutdown. + early_config.add_cleanup(capman.stop_global_capturing) + + # Finally trigger conftest loading but while capturing (issue #93). + capman.start_global_capturing() + outcome = yield + capman.suspend_global_capture() + if outcome.excinfo is not None: + out, err = capman.read_global_capture() + sys.stdout.write(out) + sys.stderr.write(err) + + +# IO Helpers. + + +class EncodedFile(io.TextIOWrapper): + __slots__ = () + + @property + def name(self) -> str: + # Ensure that file.name is a string. Workaround for a Python bug + # fixed in >=3.7.4: https://bugs.python.org/issue36015 + return repr(self.buffer) + + @property + def mode(self) -> str: + # TextIOWrapper doesn't expose a mode, but at least some of our + # tests check it. + return self.buffer.mode.replace("b", "") + + +class CaptureIO(io.TextIOWrapper): + def __init__(self) -> None: + super().__init__(io.BytesIO(), encoding="UTF-8", newline="", write_through=True) + + def getvalue(self) -> str: + assert isinstance(self.buffer, io.BytesIO) + return self.buffer.getvalue().decode("UTF-8") + + +class TeeCaptureIO(CaptureIO): + def __init__(self, other: TextIO) -> None: + self._other = other + super().__init__() + + def write(self, s: str) -> int: + super().write(s) + return self._other.write(s) + + +class DontReadFromInput(TextIO): + @property + def encoding(self) -> str: + return sys.__stdin__.encoding + + def read(self, size: int = -1) -> str: + raise OSError( + "pytest: reading from stdin while output is captured! Consider using `-s`." + ) + + readline = read + + def __next__(self) -> str: + return self.readline() + + def readlines(self, hint: Optional[int] = -1) -> List[str]: + raise OSError( + "pytest: reading from stdin while output is captured! Consider using `-s`." + ) + + def __iter__(self) -> Iterator[str]: + return self + + def fileno(self) -> int: + raise UnsupportedOperation("redirected stdin is pseudofile, has no fileno()") + + def flush(self) -> None: + raise UnsupportedOperation("redirected stdin is pseudofile, has no flush()") + + def isatty(self) -> bool: + return False + + def close(self) -> None: + pass + + def readable(self) -> bool: + return False + + def seek(self, offset: int, whence: int = 0) -> int: + raise UnsupportedOperation("redirected stdin is pseudofile, has no seek(int)") + + def seekable(self) -> bool: + return False + + def tell(self) -> int: + raise UnsupportedOperation("redirected stdin is pseudofile, has no tell()") + + def truncate(self, size: Optional[int] = None) -> int: + raise UnsupportedOperation("cannot truncate stdin") + + def write(self, data: str) -> int: + raise UnsupportedOperation("cannot write to stdin") + + def writelines(self, lines: Iterable[str]) -> None: + raise UnsupportedOperation("Cannot write to stdin") + + def writable(self) -> bool: + return False + + def __enter__(self) -> "DontReadFromInput": + return self + + def __exit__( + self, + type: Optional[Type[BaseException]], + value: Optional[BaseException], + traceback: Optional[TracebackType], + ) -> None: + pass + + @property + def buffer(self) -> BinaryIO: + # The str/bytes doesn't actually matter in this type, so OK to fake. + return self # type: ignore[return-value] + + +# Capture classes. + + +class CaptureBase(abc.ABC, Generic[AnyStr]): + EMPTY_BUFFER: AnyStr + + @abc.abstractmethod + def __init__(self, fd: int) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def start(self) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def done(self) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def suspend(self) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def resume(self) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def writeorg(self, data: AnyStr) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def snap(self) -> AnyStr: + raise NotImplementedError() + + +patchsysdict = {0: "stdin", 1: "stdout", 2: "stderr"} + + +class NoCapture(CaptureBase[str]): + EMPTY_BUFFER = "" + + def __init__(self, fd: int) -> None: + pass + + def start(self) -> None: + pass + + def done(self) -> None: + pass + + def suspend(self) -> None: + pass + + def resume(self) -> None: + pass + + def snap(self) -> str: + return "" + + def writeorg(self, data: str) -> None: + pass + + +class SysCaptureBase(CaptureBase[AnyStr]): + def __init__( + self, fd: int, tmpfile: Optional[TextIO] = None, *, tee: bool = False + ) -> None: + name = patchsysdict[fd] + self._old: TextIO = getattr(sys, name) + self.name = name + if tmpfile is None: + if name == "stdin": + tmpfile = DontReadFromInput() + else: + tmpfile = CaptureIO() if not tee else TeeCaptureIO(self._old) + self.tmpfile = tmpfile + self._state = "initialized" + + def repr(self, class_name: str) -> str: + return "<{} {} _old={} _state={!r} tmpfile={!r}>".format( + class_name, + self.name, + hasattr(self, "_old") and repr(self._old) or "", + self._state, + self.tmpfile, + ) + + def __repr__(self) -> str: + return "<{} {} _old={} _state={!r} tmpfile={!r}>".format( + self.__class__.__name__, + self.name, + hasattr(self, "_old") and repr(self._old) or "", + self._state, + self.tmpfile, + ) + + def _assert_state(self, op: str, states: Tuple[str, ...]) -> None: + assert ( + self._state in states + ), "cannot {} in state {!r}: expected one of {}".format( + op, self._state, ", ".join(states) + ) + + def start(self) -> None: + self._assert_state("start", ("initialized",)) + setattr(sys, self.name, self.tmpfile) + self._state = "started" + + def done(self) -> None: + self._assert_state("done", ("initialized", "started", "suspended", "done")) + if self._state == "done": + return + setattr(sys, self.name, self._old) + del self._old + self.tmpfile.close() + self._state = "done" + + def suspend(self) -> None: + self._assert_state("suspend", ("started", "suspended")) + setattr(sys, self.name, self._old) + self._state = "suspended" + + def resume(self) -> None: + self._assert_state("resume", ("started", "suspended")) + if self._state == "started": + return + setattr(sys, self.name, self.tmpfile) + self._state = "started" + + +class SysCaptureBinary(SysCaptureBase[bytes]): + EMPTY_BUFFER = b"" + + def snap(self) -> bytes: + self._assert_state("snap", ("started", "suspended")) + self.tmpfile.seek(0) + res = self.tmpfile.buffer.read() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def writeorg(self, data: bytes) -> None: + self._assert_state("writeorg", ("started", "suspended")) + self._old.flush() + self._old.buffer.write(data) + self._old.buffer.flush() + + +class SysCapture(SysCaptureBase[str]): + EMPTY_BUFFER = "" + + def snap(self) -> str: + self._assert_state("snap", ("started", "suspended")) + assert isinstance(self.tmpfile, CaptureIO) + res = self.tmpfile.getvalue() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def writeorg(self, data: str) -> None: + self._assert_state("writeorg", ("started", "suspended")) + self._old.write(data) + self._old.flush() + + +class FDCaptureBase(CaptureBase[AnyStr]): + def __init__(self, targetfd: int) -> None: + self.targetfd = targetfd + + try: + os.fstat(targetfd) + except OSError: + # FD capturing is conceptually simple -- create a temporary file, + # redirect the FD to it, redirect back when done. But when the + # target FD is invalid it throws a wrench into this lovely scheme. + # + # Tests themselves shouldn't care if the FD is valid, FD capturing + # should work regardless of external circumstances. So falling back + # to just sys capturing is not a good option. + # + # Further complications are the need to support suspend() and the + # possibility of FD reuse (e.g. the tmpfile getting the very same + # target FD). The following approach is robust, I believe. + self.targetfd_invalid: Optional[int] = os.open(os.devnull, os.O_RDWR) + os.dup2(self.targetfd_invalid, targetfd) + else: + self.targetfd_invalid = None + self.targetfd_save = os.dup(targetfd) + + if targetfd == 0: + self.tmpfile = open(os.devnull, encoding="utf-8") + self.syscapture: CaptureBase[str] = SysCapture(targetfd) + else: + self.tmpfile = EncodedFile( + TemporaryFile(buffering=0), + encoding="utf-8", + errors="replace", + newline="", + write_through=True, + ) + if targetfd in patchsysdict: + self.syscapture = SysCapture(targetfd, self.tmpfile) + else: + self.syscapture = NoCapture(targetfd) + + self._state = "initialized" + + def __repr__(self) -> str: + return "<{} {} oldfd={} _state={!r} tmpfile={!r}>".format( + self.__class__.__name__, + self.targetfd, + self.targetfd_save, + self._state, + self.tmpfile, + ) + + def _assert_state(self, op: str, states: Tuple[str, ...]) -> None: + assert ( + self._state in states + ), "cannot {} in state {!r}: expected one of {}".format( + op, self._state, ", ".join(states) + ) + + def start(self) -> None: + """Start capturing on targetfd using memorized tmpfile.""" + self._assert_state("start", ("initialized",)) + os.dup2(self.tmpfile.fileno(), self.targetfd) + self.syscapture.start() + self._state = "started" + + def done(self) -> None: + """Stop capturing, restore streams, return original capture file, + seeked to position zero.""" + self._assert_state("done", ("initialized", "started", "suspended", "done")) + if self._state == "done": + return + os.dup2(self.targetfd_save, self.targetfd) + os.close(self.targetfd_save) + if self.targetfd_invalid is not None: + if self.targetfd_invalid != self.targetfd: + os.close(self.targetfd) + os.close(self.targetfd_invalid) + self.syscapture.done() + self.tmpfile.close() + self._state = "done" + + def suspend(self) -> None: + self._assert_state("suspend", ("started", "suspended")) + if self._state == "suspended": + return + self.syscapture.suspend() + os.dup2(self.targetfd_save, self.targetfd) + self._state = "suspended" + + def resume(self) -> None: + self._assert_state("resume", ("started", "suspended")) + if self._state == "started": + return + self.syscapture.resume() + os.dup2(self.tmpfile.fileno(), self.targetfd) + self._state = "started" + + +class FDCaptureBinary(FDCaptureBase[bytes]): + """Capture IO to/from a given OS-level file descriptor. + + snap() produces `bytes`. + """ + + EMPTY_BUFFER = b"" + + def snap(self) -> bytes: + self._assert_state("snap", ("started", "suspended")) + self.tmpfile.seek(0) + res = self.tmpfile.buffer.read() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def writeorg(self, data: bytes) -> None: + """Write to original file descriptor.""" + self._assert_state("writeorg", ("started", "suspended")) + os.write(self.targetfd_save, data) + + +class FDCapture(FDCaptureBase[str]): + """Capture IO to/from a given OS-level file descriptor. + + snap() produces text. + """ + + EMPTY_BUFFER = "" + + def snap(self) -> str: + self._assert_state("snap", ("started", "suspended")) + self.tmpfile.seek(0) + res = self.tmpfile.read() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def writeorg(self, data: str) -> None: + """Write to original file descriptor.""" + self._assert_state("writeorg", ("started", "suspended")) + # XXX use encoding of original stream + os.write(self.targetfd_save, data.encode("utf-8")) + + +# MultiCapture + + +# Generic NamedTuple only supported since Python 3.11. +if sys.version_info >= (3, 11) or TYPE_CHECKING: + + @final + class CaptureResult(NamedTuple, Generic[AnyStr]): + """The result of :method:`CaptureFixture.readouterr`.""" + + out: AnyStr + err: AnyStr + +else: + + class CaptureResult( + collections.namedtuple("CaptureResult", ["out", "err"]), Generic[AnyStr] + ): + """The result of :method:`CaptureFixture.readouterr`.""" + + __slots__ = () + + +class MultiCapture(Generic[AnyStr]): + _state = None + _in_suspended = False + + def __init__( + self, + in_: Optional[CaptureBase[AnyStr]], + out: Optional[CaptureBase[AnyStr]], + err: Optional[CaptureBase[AnyStr]], + ) -> None: + self.in_: Optional[CaptureBase[AnyStr]] = in_ + self.out: Optional[CaptureBase[AnyStr]] = out + self.err: Optional[CaptureBase[AnyStr]] = err + + def __repr__(self) -> str: + return "".format( + self.out, + self.err, + self.in_, + self._state, + self._in_suspended, + ) + + def start_capturing(self) -> None: + self._state = "started" + if self.in_: + self.in_.start() + if self.out: + self.out.start() + if self.err: + self.err.start() + + def pop_outerr_to_orig(self) -> Tuple[AnyStr, AnyStr]: + """Pop current snapshot out/err capture and flush to orig streams.""" + out, err = self.readouterr() + if out: + assert self.out is not None + self.out.writeorg(out) + if err: + assert self.err is not None + self.err.writeorg(err) + return out, err + + def suspend_capturing(self, in_: bool = False) -> None: + self._state = "suspended" + if self.out: + self.out.suspend() + if self.err: + self.err.suspend() + if in_ and self.in_: + self.in_.suspend() + self._in_suspended = True + + def resume_capturing(self) -> None: + self._state = "started" + if self.out: + self.out.resume() + if self.err: + self.err.resume() + if self._in_suspended: + assert self.in_ is not None + self.in_.resume() + self._in_suspended = False + + def stop_capturing(self) -> None: + """Stop capturing and reset capturing streams.""" + if self._state == "stopped": + raise ValueError("was already stopped") + self._state = "stopped" + if self.out: + self.out.done() + if self.err: + self.err.done() + if self.in_: + self.in_.done() + + def is_started(self) -> bool: + """Whether actively capturing -- not suspended or stopped.""" + return self._state == "started" + + def readouterr(self) -> CaptureResult[AnyStr]: + out = self.out.snap() if self.out else "" + err = self.err.snap() if self.err else "" + # TODO: This type error is real, need to fix. + return CaptureResult(out, err) # type: ignore[arg-type] + + +def _get_multicapture(method: "_CaptureMethod") -> MultiCapture[str]: + if method == "fd": + return MultiCapture(in_=FDCapture(0), out=FDCapture(1), err=FDCapture(2)) + elif method == "sys": + return MultiCapture(in_=SysCapture(0), out=SysCapture(1), err=SysCapture(2)) + elif method == "no": + return MultiCapture(in_=None, out=None, err=None) + elif method == "tee-sys": + return MultiCapture( + in_=None, out=SysCapture(1, tee=True), err=SysCapture(2, tee=True) + ) + raise ValueError(f"unknown capturing method: {method!r}") + + +# CaptureManager and CaptureFixture + + +class CaptureManager: + """The capture plugin. + + Manages that the appropriate capture method is enabled/disabled during + collection and each test phase (setup, call, teardown). After each of + those points, the captured output is obtained and attached to the + collection/runtest report. + + There are two levels of capture: + + * global: enabled by default and can be suppressed by the ``-s`` + option. This is always enabled/disabled during collection and each test + phase. + + * fixture: when a test function or one of its fixture depend on the + ``capsys`` or ``capfd`` fixtures. In this case special handling is + needed to ensure the fixtures take precedence over the global capture. + """ + + def __init__(self, method: "_CaptureMethod") -> None: + self._method: Final = method + self._global_capturing: Optional[MultiCapture[str]] = None + self._capture_fixture: Optional[CaptureFixture[Any]] = None + + def __repr__(self) -> str: + return "".format( + self._method, self._global_capturing, self._capture_fixture + ) + + def is_capturing(self) -> Union[str, bool]: + if self.is_globally_capturing(): + return "global" + if self._capture_fixture: + return "fixture %s" % self._capture_fixture.request.fixturename + return False + + # Global capturing control + + def is_globally_capturing(self) -> bool: + return self._method != "no" + + def start_global_capturing(self) -> None: + assert self._global_capturing is None + self._global_capturing = _get_multicapture(self._method) + self._global_capturing.start_capturing() + + def stop_global_capturing(self) -> None: + if self._global_capturing is not None: + self._global_capturing.pop_outerr_to_orig() + self._global_capturing.stop_capturing() + self._global_capturing = None + + def resume_global_capture(self) -> None: + # During teardown of the python process, and on rare occasions, capture + # attributes can be `None` while trying to resume global capture. + if self._global_capturing is not None: + self._global_capturing.resume_capturing() + + def suspend_global_capture(self, in_: bool = False) -> None: + if self._global_capturing is not None: + self._global_capturing.suspend_capturing(in_=in_) + + def suspend(self, in_: bool = False) -> None: + # Need to undo local capsys-et-al if it exists before disabling global capture. + self.suspend_fixture() + self.suspend_global_capture(in_) + + def resume(self) -> None: + self.resume_global_capture() + self.resume_fixture() + + def read_global_capture(self) -> CaptureResult[str]: + assert self._global_capturing is not None + return self._global_capturing.readouterr() + + # Fixture Control + + def set_fixture(self, capture_fixture: "CaptureFixture[Any]") -> None: + if self._capture_fixture: + current_fixture = self._capture_fixture.request.fixturename + requested_fixture = capture_fixture.request.fixturename + capture_fixture.request.raiseerror( + "cannot use {} and {} at the same time".format( + requested_fixture, current_fixture + ) + ) + self._capture_fixture = capture_fixture + + def unset_fixture(self) -> None: + self._capture_fixture = None + + def activate_fixture(self) -> None: + """If the current item is using ``capsys`` or ``capfd``, activate + them so they take precedence over the global capture.""" + if self._capture_fixture: + self._capture_fixture._start() + + def deactivate_fixture(self) -> None: + """Deactivate the ``capsys`` or ``capfd`` fixture of this item, if any.""" + if self._capture_fixture: + self._capture_fixture.close() + + def suspend_fixture(self) -> None: + if self._capture_fixture: + self._capture_fixture._suspend() + + def resume_fixture(self) -> None: + if self._capture_fixture: + self._capture_fixture._resume() + + # Helper context managers + + @contextlib.contextmanager + def global_and_fixture_disabled(self) -> Generator[None, None, None]: + """Context manager to temporarily disable global and current fixture capturing.""" + do_fixture = self._capture_fixture and self._capture_fixture._is_started() + if do_fixture: + self.suspend_fixture() + do_global = self._global_capturing and self._global_capturing.is_started() + if do_global: + self.suspend_global_capture() + try: + yield + finally: + if do_global: + self.resume_global_capture() + if do_fixture: + self.resume_fixture() + + @contextlib.contextmanager + def item_capture(self, when: str, item: Item) -> Generator[None, None, None]: + self.resume_global_capture() + self.activate_fixture() + try: + yield + finally: + self.deactivate_fixture() + self.suspend_global_capture(in_=False) + + out, err = self.read_global_capture() + item.add_report_section(when, "stdout", out) + item.add_report_section(when, "stderr", err) + + # Hooks + + @hookimpl(hookwrapper=True) + def pytest_make_collect_report(self, collector: Collector): + if isinstance(collector, File): + self.resume_global_capture() + outcome = yield + self.suspend_global_capture() + out, err = self.read_global_capture() + rep = outcome.get_result() + if out: + rep.sections.append(("Captured stdout", out)) + if err: + rep.sections.append(("Captured stderr", err)) + else: + yield + + @hookimpl(hookwrapper=True) + def pytest_runtest_setup(self, item: Item) -> Generator[None, None, None]: + with self.item_capture("setup", item): + yield + + @hookimpl(hookwrapper=True) + def pytest_runtest_call(self, item: Item) -> Generator[None, None, None]: + with self.item_capture("call", item): + yield + + @hookimpl(hookwrapper=True) + def pytest_runtest_teardown(self, item: Item) -> Generator[None, None, None]: + with self.item_capture("teardown", item): + yield + + @hookimpl(tryfirst=True) + def pytest_keyboard_interrupt(self) -> None: + self.stop_global_capturing() + + @hookimpl(tryfirst=True) + def pytest_internalerror(self) -> None: + self.stop_global_capturing() + + +class CaptureFixture(Generic[AnyStr]): + """Object returned by the :fixture:`capsys`, :fixture:`capsysbinary`, + :fixture:`capfd` and :fixture:`capfdbinary` fixtures.""" + + def __init__( + self, + captureclass: Type[CaptureBase[AnyStr]], + request: SubRequest, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self.captureclass: Type[CaptureBase[AnyStr]] = captureclass + self.request = request + self._capture: Optional[MultiCapture[AnyStr]] = None + self._captured_out: AnyStr = self.captureclass.EMPTY_BUFFER + self._captured_err: AnyStr = self.captureclass.EMPTY_BUFFER + + def _start(self) -> None: + if self._capture is None: + self._capture = MultiCapture( + in_=None, + out=self.captureclass(1), + err=self.captureclass(2), + ) + self._capture.start_capturing() + + def close(self) -> None: + if self._capture is not None: + out, err = self._capture.pop_outerr_to_orig() + self._captured_out += out + self._captured_err += err + self._capture.stop_capturing() + self._capture = None + + def readouterr(self) -> CaptureResult[AnyStr]: + """Read and return the captured output so far, resetting the internal + buffer. + + :returns: + The captured content as a namedtuple with ``out`` and ``err`` + string attributes. + """ + captured_out, captured_err = self._captured_out, self._captured_err + if self._capture is not None: + out, err = self._capture.readouterr() + captured_out += out + captured_err += err + self._captured_out = self.captureclass.EMPTY_BUFFER + self._captured_err = self.captureclass.EMPTY_BUFFER + return CaptureResult(captured_out, captured_err) + + def _suspend(self) -> None: + """Suspend this fixture's own capturing temporarily.""" + if self._capture is not None: + self._capture.suspend_capturing() + + def _resume(self) -> None: + """Resume this fixture's own capturing temporarily.""" + if self._capture is not None: + self._capture.resume_capturing() + + def _is_started(self) -> bool: + """Whether actively capturing -- not disabled or closed.""" + if self._capture is not None: + return self._capture.is_started() + return False + + @contextlib.contextmanager + def disabled(self) -> Generator[None, None, None]: + """Temporarily disable capturing while inside the ``with`` block.""" + capmanager: CaptureManager = self.request.config.pluginmanager.getplugin( + "capturemanager" + ) + with capmanager.global_and_fixture_disabled(): + yield + + +# The fixtures. + + +@fixture +def capsys(request: SubRequest) -> Generator[CaptureFixture[str], None, None]: + r"""Enable text capturing of writes to ``sys.stdout`` and ``sys.stderr``. + + The captured output is made available via ``capsys.readouterr()`` method + calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``text`` objects. + + Returns an instance of :class:`CaptureFixture[str] `. + + Example: + + .. code-block:: python + + def test_output(capsys): + print("hello") + captured = capsys.readouterr() + assert captured.out == "hello\n" + """ + capman: CaptureManager = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture = CaptureFixture(SysCapture, request, _ispytest=True) + capman.set_fixture(capture_fixture) + capture_fixture._start() + yield capture_fixture + capture_fixture.close() + capman.unset_fixture() + + +@fixture +def capsysbinary(request: SubRequest) -> Generator[CaptureFixture[bytes], None, None]: + r"""Enable bytes capturing of writes to ``sys.stdout`` and ``sys.stderr``. + + The captured output is made available via ``capsysbinary.readouterr()`` + method calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``bytes`` objects. + + Returns an instance of :class:`CaptureFixture[bytes] `. + + Example: + + .. code-block:: python + + def test_output(capsysbinary): + print("hello") + captured = capsysbinary.readouterr() + assert captured.out == b"hello\n" + """ + capman: CaptureManager = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture = CaptureFixture(SysCaptureBinary, request, _ispytest=True) + capman.set_fixture(capture_fixture) + capture_fixture._start() + yield capture_fixture + capture_fixture.close() + capman.unset_fixture() + + +@fixture +def capfd(request: SubRequest) -> Generator[CaptureFixture[str], None, None]: + r"""Enable text capturing of writes to file descriptors ``1`` and ``2``. + + The captured output is made available via ``capfd.readouterr()`` method + calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``text`` objects. + + Returns an instance of :class:`CaptureFixture[str] `. + + Example: + + .. code-block:: python + + def test_system_echo(capfd): + os.system('echo "hello"') + captured = capfd.readouterr() + assert captured.out == "hello\n" + """ + capman: CaptureManager = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture = CaptureFixture(FDCapture, request, _ispytest=True) + capman.set_fixture(capture_fixture) + capture_fixture._start() + yield capture_fixture + capture_fixture.close() + capman.unset_fixture() + + +@fixture +def capfdbinary(request: SubRequest) -> Generator[CaptureFixture[bytes], None, None]: + r"""Enable bytes capturing of writes to file descriptors ``1`` and ``2``. + + The captured output is made available via ``capfd.readouterr()`` method + calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``byte`` objects. + + Returns an instance of :class:`CaptureFixture[bytes] `. + + Example: + + .. code-block:: python + + def test_system_echo(capfdbinary): + os.system('echo "hello"') + captured = capfdbinary.readouterr() + assert captured.out == b"hello\n" + + """ + capman: CaptureManager = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture = CaptureFixture(FDCaptureBinary, request, _ispytest=True) + capman.set_fixture(capture_fixture) + capture_fixture._start() + yield capture_fixture + capture_fixture.close() + capman.unset_fixture() diff --git a/venv/lib/python3.12/site-packages/_pytest/compat.py b/venv/lib/python3.12/site-packages/_pytest/compat.py new file mode 100644 index 0000000000..1d0add7363 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/compat.py @@ -0,0 +1,435 @@ +"""Python version compatibility code.""" +from __future__ import annotations + +import dataclasses +import enum +import functools +import inspect +import os +import sys +from inspect import Parameter +from inspect import signature +from pathlib import Path +from typing import Any +from typing import Callable +from typing import Generic +from typing import NoReturn +from typing import TYPE_CHECKING +from typing import TypeVar + +import py + +# fmt: off +# Workaround for https://github.com/sphinx-doc/sphinx/issues/10351. +# If `overload` is imported from `compat` instead of from `typing`, +# Sphinx doesn't recognize it as `overload` and the API docs for +# overloaded functions look good again. But type checkers handle +# it fine. +# fmt: on +if True: + from typing import overload as overload + +if TYPE_CHECKING: + from typing_extensions import Final + + +_T = TypeVar("_T") +_S = TypeVar("_S") + +#: constant to prepare valuing pylib path replacements/lazy proxies later on +# intended for removal in pytest 8.0 or 9.0 + +# fmt: off +# intentional space to create a fake difference for the verification +LEGACY_PATH = py.path. local +# fmt: on + + +def legacy_path(path: str | os.PathLike[str]) -> LEGACY_PATH: + """Internal wrapper to prepare lazy proxies for legacy_path instances""" + return LEGACY_PATH(path) + + +# fmt: off +# Singleton type for NOTSET, as described in: +# https://www.python.org/dev/peps/pep-0484/#support-for-singleton-types-in-unions +class NotSetType(enum.Enum): + token = 0 +NOTSET: Final = NotSetType.token # noqa: E305 +# fmt: on + +if sys.version_info >= (3, 8): + import importlib.metadata + + importlib_metadata = importlib.metadata +else: + import importlib_metadata as importlib_metadata # noqa: F401 + + +def _format_args(func: Callable[..., Any]) -> str: + return str(signature(func)) + + +def is_generator(func: object) -> bool: + genfunc = inspect.isgeneratorfunction(func) + return genfunc and not iscoroutinefunction(func) + + +def iscoroutinefunction(func: object) -> bool: + """Return True if func is a coroutine function (a function defined with async + def syntax, and doesn't contain yield), or a function decorated with + @asyncio.coroutine. + + Note: copied and modified from Python 3.5's builtin couroutines.py to avoid + importing asyncio directly, which in turns also initializes the "logging" + module as a side-effect (see issue #8). + """ + return inspect.iscoroutinefunction(func) or getattr(func, "_is_coroutine", False) + + +def is_async_function(func: object) -> bool: + """Return True if the given function seems to be an async function or + an async generator.""" + return iscoroutinefunction(func) or inspect.isasyncgenfunction(func) + + +def getlocation(function, curdir: str | None = None) -> str: + function = get_real_func(function) + fn = Path(inspect.getfile(function)) + lineno = function.__code__.co_firstlineno + if curdir is not None: + try: + relfn = fn.relative_to(curdir) + except ValueError: + pass + else: + return "%s:%d" % (relfn, lineno + 1) + return "%s:%d" % (fn, lineno + 1) + + +def num_mock_patch_args(function) -> int: + """Return number of arguments used up by mock arguments (if any).""" + patchings = getattr(function, "patchings", None) + if not patchings: + return 0 + + mock_sentinel = getattr(sys.modules.get("mock"), "DEFAULT", object()) + ut_mock_sentinel = getattr(sys.modules.get("unittest.mock"), "DEFAULT", object()) + + return len( + [ + p + for p in patchings + if not p.attribute_name + and (p.new is mock_sentinel or p.new is ut_mock_sentinel) + ] + ) + + +def getfuncargnames( + function: Callable[..., Any], + *, + name: str = "", + is_method: bool = False, + cls: type | None = None, +) -> tuple[str, ...]: + """Return the names of a function's mandatory arguments. + + Should return the names of all function arguments that: + * Aren't bound to an instance or type as in instance or class methods. + * Don't have default values. + * Aren't bound with functools.partial. + * Aren't replaced with mocks. + + The is_method and cls arguments indicate that the function should + be treated as a bound method even though it's not unless, only in + the case of cls, the function is a static method. + + The name parameter should be the original name in which the function was collected. + """ + # TODO(RonnyPfannschmidt): This function should be refactored when we + # revisit fixtures. The fixture mechanism should ask the node for + # the fixture names, and not try to obtain directly from the + # function object well after collection has occurred. + + # The parameters attribute of a Signature object contains an + # ordered mapping of parameter names to Parameter instances. This + # creates a tuple of the names of the parameters that don't have + # defaults. + try: + parameters = signature(function).parameters + except (ValueError, TypeError) as e: + from _pytest.outcomes import fail + + fail( + f"Could not determine arguments of {function!r}: {e}", + pytrace=False, + ) + + arg_names = tuple( + p.name + for p in parameters.values() + if ( + p.kind is Parameter.POSITIONAL_OR_KEYWORD + or p.kind is Parameter.KEYWORD_ONLY + ) + and p.default is Parameter.empty + ) + if not name: + name = function.__name__ + + # If this function should be treated as a bound method even though + # it's passed as an unbound method or function, remove the first + # parameter name. + if is_method or ( + # Not using `getattr` because we don't want to resolve the staticmethod. + # Not using `cls.__dict__` because we want to check the entire MRO. + cls + and not isinstance( + inspect.getattr_static(cls, name, default=None), staticmethod + ) + ): + arg_names = arg_names[1:] + # Remove any names that will be replaced with mocks. + if hasattr(function, "__wrapped__"): + arg_names = arg_names[num_mock_patch_args(function) :] + return arg_names + + +def get_default_arg_names(function: Callable[..., Any]) -> tuple[str, ...]: + # Note: this code intentionally mirrors the code at the beginning of + # getfuncargnames, to get the arguments which were excluded from its result + # because they had default values. + return tuple( + p.name + for p in signature(function).parameters.values() + if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY) + and p.default is not Parameter.empty + ) + + +_non_printable_ascii_translate_table = { + i: f"\\x{i:02x}" for i in range(128) if i not in range(32, 127) +} +_non_printable_ascii_translate_table.update( + {ord("\t"): "\\t", ord("\r"): "\\r", ord("\n"): "\\n"} +) + + +def _translate_non_printable(s: str) -> str: + return s.translate(_non_printable_ascii_translate_table) + + +STRING_TYPES = bytes, str + + +def _bytes_to_ascii(val: bytes) -> str: + return val.decode("ascii", "backslashreplace") + + +def ascii_escaped(val: bytes | str) -> str: + r"""If val is pure ASCII, return it as an str, otherwise, escape + bytes objects into a sequence of escaped bytes: + + b'\xc3\xb4\xc5\xd6' -> r'\xc3\xb4\xc5\xd6' + + and escapes unicode objects into a sequence of escaped unicode + ids, e.g.: + + r'4\nV\U00043efa\x0eMXWB\x1e\u3028\u15fd\xcd\U0007d944' + + Note: + The obvious "v.decode('unicode-escape')" will return + valid UTF-8 unicode if it finds them in bytes, but we + want to return escaped bytes for any byte, even if they match + a UTF-8 string. + """ + if isinstance(val, bytes): + ret = _bytes_to_ascii(val) + else: + ret = val.encode("unicode_escape").decode("ascii") + return _translate_non_printable(ret) + + +@dataclasses.dataclass +class _PytestWrapper: + """Dummy wrapper around a function object for internal use only. + + Used to correctly unwrap the underlying function object when we are + creating fixtures, because we wrap the function object ourselves with a + decorator to issue warnings when the fixture function is called directly. + """ + + obj: Any + + +def get_real_func(obj): + """Get the real function object of the (possibly) wrapped object by + functools.wraps or functools.partial.""" + start_obj = obj + for i in range(100): + # __pytest_wrapped__ is set by @pytest.fixture when wrapping the fixture function + # to trigger a warning if it gets called directly instead of by pytest: we don't + # want to unwrap further than this otherwise we lose useful wrappings like @mock.patch (#3774) + new_obj = getattr(obj, "__pytest_wrapped__", None) + if isinstance(new_obj, _PytestWrapper): + obj = new_obj.obj + break + new_obj = getattr(obj, "__wrapped__", None) + if new_obj is None: + break + obj = new_obj + else: + from _pytest._io.saferepr import saferepr + + raise ValueError( + ("could not find real function of {start}\nstopped at {current}").format( + start=saferepr(start_obj), current=saferepr(obj) + ) + ) + if isinstance(obj, functools.partial): + obj = obj.func + return obj + + +def get_real_method(obj, holder): + """Attempt to obtain the real function object that might be wrapping + ``obj``, while at the same time returning a bound method to ``holder`` if + the original object was a bound method.""" + try: + is_method = hasattr(obj, "__func__") + obj = get_real_func(obj) + except Exception: # pragma: no cover + return obj + if is_method and hasattr(obj, "__get__") and callable(obj.__get__): + obj = obj.__get__(holder) + return obj + + +def getimfunc(func): + try: + return func.__func__ + except AttributeError: + return func + + +def safe_getattr(object: Any, name: str, default: Any) -> Any: + """Like getattr but return default upon any Exception or any OutcomeException. + + Attribute access can potentially fail for 'evil' Python objects. + See issue #214. + It catches OutcomeException because of #2490 (issue #580), new outcomes + are derived from BaseException instead of Exception (for more details + check #2707). + """ + from _pytest.outcomes import TEST_OUTCOME + + try: + return getattr(object, name, default) + except TEST_OUTCOME: + return default + + +def safe_isclass(obj: object) -> bool: + """Ignore any exception via isinstance on Python 3.""" + try: + return inspect.isclass(obj) + except Exception: + return False + + +if TYPE_CHECKING: + if sys.version_info >= (3, 8): + from typing import final as final + else: + from typing_extensions import final as final +elif sys.version_info >= (3, 8): + from typing import final as final +else: + + def final(f): + return f + + +if sys.version_info >= (3, 8): + from functools import cached_property as cached_property +else: + + class cached_property(Generic[_S, _T]): + __slots__ = ("func", "__doc__") + + def __init__(self, func: Callable[[_S], _T]) -> None: + self.func = func + self.__doc__ = func.__doc__ + + @overload + def __get__( + self, instance: None, owner: type[_S] | None = ... + ) -> cached_property[_S, _T]: + ... + + @overload + def __get__(self, instance: _S, owner: type[_S] | None = ...) -> _T: + ... + + def __get__(self, instance, owner=None): + if instance is None: + return self + value = instance.__dict__[self.func.__name__] = self.func(instance) + return value + + +def get_user_id() -> int | None: + """Return the current process's real user id or None if it could not be + determined. + + :return: The user id or None if it could not be determined. + """ + # mypy follows the version and platform checking expectation of PEP 484: + # https://mypy.readthedocs.io/en/stable/common_issues.html?highlight=platform#python-version-and-system-platform-checks + # Containment checks are too complex for mypy v1.5.0 and cause failure. + if sys.platform == "win32" or sys.platform == "emscripten": + # win32 does not have a getuid() function. + # Emscripten has a return 0 stub. + return None + else: + # On other platforms, a return value of -1 is assumed to indicate that + # the current process's real user id could not be determined. + ERROR = -1 + uid = os.getuid() + return uid if uid != ERROR else None + + +# Perform exhaustiveness checking. +# +# Consider this example: +# +# MyUnion = Union[int, str] +# +# def handle(x: MyUnion) -> int { +# if isinstance(x, int): +# return 1 +# elif isinstance(x, str): +# return 2 +# else: +# raise Exception('unreachable') +# +# Now suppose we add a new variant: +# +# MyUnion = Union[int, str, bytes] +# +# After doing this, we must remember ourselves to go and update the handle +# function to handle the new variant. +# +# With `assert_never` we can do better: +# +# // raise Exception('unreachable') +# return assert_never(x) +# +# Now, if we forget to handle the new variant, the type-checker will emit a +# compile-time error, instead of the runtime error we would have gotten +# previously. +# +# This also work for Enums (if you use `is` to compare) and Literals. +def assert_never(value: NoReturn) -> NoReturn: + assert False, f"Unhandled value: {value} ({type(value).__name__})" diff --git a/venv/lib/python3.12/site-packages/_pytest/config/__init__.py b/venv/lib/python3.12/site-packages/_pytest/config/__init__.py new file mode 100644 index 0000000000..e3990d175d --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/config/__init__.py @@ -0,0 +1,1816 @@ +"""Command line options, ini-file and conftest.py processing.""" +import argparse +import collections.abc +import copy +import dataclasses +import enum +import glob +import inspect +import os +import re +import shlex +import sys +import types +import warnings +from functools import lru_cache +from pathlib import Path +from textwrap import dedent +from types import FunctionType +from types import TracebackType +from typing import Any +from typing import Callable +from typing import cast +from typing import Dict +from typing import Generator +from typing import IO +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Optional +from typing import Sequence +from typing import Set +from typing import TextIO +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import Union + +from pluggy import HookimplMarker +from pluggy import HookspecMarker +from pluggy import PluginManager + +import _pytest._code +import _pytest.deprecated +import _pytest.hookspec +from .exceptions import PrintHelp as PrintHelp +from .exceptions import UsageError as UsageError +from .findpaths import determine_setup +from _pytest._code import ExceptionInfo +from _pytest._code import filter_traceback +from _pytest._io import TerminalWriter +from _pytest.compat import final +from _pytest.compat import importlib_metadata # type: ignore[attr-defined] +from _pytest.outcomes import fail +from _pytest.outcomes import Skipped +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath +from _pytest.pathlib import import_path +from _pytest.pathlib import ImportMode +from _pytest.pathlib import resolve_package_path +from _pytest.pathlib import safe_exists +from _pytest.stash import Stash +from _pytest.warning_types import PytestConfigWarning +from _pytest.warning_types import warn_explicit_for + +if TYPE_CHECKING: + from _pytest._code.code import _TracebackStyle + from _pytest.terminal import TerminalReporter + from .argparsing import Argument + + +_PluggyPlugin = object +"""A type to represent plugin objects. + +Plugins can be any namespace, so we can't narrow it down much, but we use an +alias to make the intent clear. + +Ideally this type would be provided by pluggy itself. +""" + + +hookimpl = HookimplMarker("pytest") +hookspec = HookspecMarker("pytest") + + +@final +class ExitCode(enum.IntEnum): + """Encodes the valid exit codes by pytest. + + Currently users and plugins may supply other exit codes as well. + + .. versionadded:: 5.0 + """ + + #: Tests passed. + OK = 0 + #: Tests failed. + TESTS_FAILED = 1 + #: pytest was interrupted. + INTERRUPTED = 2 + #: An internal error got in the way. + INTERNAL_ERROR = 3 + #: pytest was misused. + USAGE_ERROR = 4 + #: pytest couldn't find tests. + NO_TESTS_COLLECTED = 5 + + +class ConftestImportFailure(Exception): + def __init__( + self, + path: Path, + excinfo: Tuple[Type[Exception], Exception, TracebackType], + ) -> None: + super().__init__(path, excinfo) + self.path = path + self.excinfo = excinfo + + def __str__(self) -> str: + return "{}: {} (from {})".format( + self.excinfo[0].__name__, self.excinfo[1], self.path + ) + + +def filter_traceback_for_conftest_import_failure( + entry: _pytest._code.TracebackEntry, +) -> bool: + """Filter tracebacks entries which point to pytest internals or importlib. + + Make a special case for importlib because we use it to import test modules and conftest files + in _pytest.pathlib.import_path. + """ + return filter_traceback(entry) and "importlib" not in str(entry.path).split(os.sep) + + +def main( + args: Optional[Union[List[str], "os.PathLike[str]"]] = None, + plugins: Optional[Sequence[Union[str, _PluggyPlugin]]] = None, +) -> Union[int, ExitCode]: + """Perform an in-process test run. + + :param args: + List of command line arguments. If `None` or not given, defaults to reading + arguments directly from the process command line (:data:`sys.argv`). + :param plugins: List of plugin objects to be auto-registered during initialization. + + :returns: An exit code. + """ + try: + try: + config = _prepareconfig(args, plugins) + except ConftestImportFailure as e: + exc_info = ExceptionInfo.from_exc_info(e.excinfo) + tw = TerminalWriter(sys.stderr) + tw.line(f"ImportError while loading conftest '{e.path}'.", red=True) + exc_info.traceback = exc_info.traceback.filter( + filter_traceback_for_conftest_import_failure + ) + exc_repr = ( + exc_info.getrepr(style="short", chain=False) + if exc_info.traceback + else exc_info.exconly() + ) + formatted_tb = str(exc_repr) + for line in formatted_tb.splitlines(): + tw.line(line.rstrip(), red=True) + return ExitCode.USAGE_ERROR + else: + try: + ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main( + config=config + ) + try: + return ExitCode(ret) + except ValueError: + return ret + finally: + config._ensure_unconfigure() + except UsageError as e: + tw = TerminalWriter(sys.stderr) + for msg in e.args: + tw.line(f"ERROR: {msg}\n", red=True) + return ExitCode.USAGE_ERROR + + +def console_main() -> int: + """The CLI entry point of pytest. + + This function is not meant for programmable use; use `main()` instead. + """ + # https://docs.python.org/3/library/signal.html#note-on-sigpipe + try: + code = main() + sys.stdout.flush() + return code + except BrokenPipeError: + # Python flushes standard streams on exit; redirect remaining output + # to devnull to avoid another BrokenPipeError at shutdown + devnull = os.open(os.devnull, os.O_WRONLY) + os.dup2(devnull, sys.stdout.fileno()) + return 1 # Python exits with error code 1 on EPIPE + + +class cmdline: # compatibility namespace + main = staticmethod(main) + + +def filename_arg(path: str, optname: str) -> str: + """Argparse type validator for filename arguments. + + :path: Path of filename. + :optname: Name of the option. + """ + if os.path.isdir(path): + raise UsageError(f"{optname} must be a filename, given: {path}") + return path + + +def directory_arg(path: str, optname: str) -> str: + """Argparse type validator for directory arguments. + + :path: Path of directory. + :optname: Name of the option. + """ + if not os.path.isdir(path): + raise UsageError(f"{optname} must be a directory, given: {path}") + return path + + +# Plugins that cannot be disabled via "-p no:X" currently. +essential_plugins = ( + "mark", + "main", + "runner", + "fixtures", + "helpconfig", # Provides -p. +) + +default_plugins = essential_plugins + ( + "python", + "terminal", + "debugging", + "unittest", + "capture", + "skipping", + "legacypath", + "tmpdir", + "monkeypatch", + "recwarn", + "pastebin", + "nose", + "assertion", + "junitxml", + "doctest", + "cacheprovider", + "freeze_support", + "setuponly", + "setupplan", + "stepwise", + "warnings", + "logging", + "reports", + "python_path", + *(["unraisableexception", "threadexception"] if sys.version_info >= (3, 8) else []), + "faulthandler", +) + +builtin_plugins = set(default_plugins) +builtin_plugins.add("pytester") +builtin_plugins.add("pytester_assertions") + + +def get_config( + args: Optional[List[str]] = None, + plugins: Optional[Sequence[Union[str, _PluggyPlugin]]] = None, +) -> "Config": + # subsequent calls to main will create a fresh instance + pluginmanager = PytestPluginManager() + config = Config( + pluginmanager, + invocation_params=Config.InvocationParams( + args=args or (), + plugins=plugins, + dir=Path.cwd(), + ), + ) + + if args is not None: + # Handle any "-p no:plugin" args. + pluginmanager.consider_preparse(args, exclude_only=True) + + for spec in default_plugins: + pluginmanager.import_plugin(spec) + + return config + + +def get_plugin_manager() -> "PytestPluginManager": + """Obtain a new instance of the + :py:class:`pytest.PytestPluginManager`, with default plugins + already loaded. + + This function can be used by integration with other tools, like hooking + into pytest to run tests into an IDE. + """ + return get_config().pluginmanager + + +def _prepareconfig( + args: Optional[Union[List[str], "os.PathLike[str]"]] = None, + plugins: Optional[Sequence[Union[str, _PluggyPlugin]]] = None, +) -> "Config": + if args is None: + args = sys.argv[1:] + elif isinstance(args, os.PathLike): + args = [os.fspath(args)] + elif not isinstance(args, list): + msg = ( # type:ignore[unreachable] + "`args` parameter expected to be a list of strings, got: {!r} (type: {})" + ) + raise TypeError(msg.format(args, type(args))) + + config = get_config(args, plugins) + pluginmanager = config.pluginmanager + try: + if plugins: + for plugin in plugins: + if isinstance(plugin, str): + pluginmanager.consider_pluginarg(plugin) + else: + pluginmanager.register(plugin) + config = pluginmanager.hook.pytest_cmdline_parse( + pluginmanager=pluginmanager, args=args + ) + return config + except BaseException: + config._ensure_unconfigure() + raise + + +def _get_directory(path: Path) -> Path: + """Get the directory of a path - itself if already a directory.""" + if path.is_file(): + return path.parent + else: + return path + + +def _get_legacy_hook_marks( + method: Any, + hook_type: str, + opt_names: Tuple[str, ...], +) -> Dict[str, bool]: + if TYPE_CHECKING: + # abuse typeguard from importlib to avoid massive method type union thats lacking a alias + assert inspect.isroutine(method) + known_marks: set[str] = {m.name for m in getattr(method, "pytestmark", [])} + must_warn: list[str] = [] + opts: dict[str, bool] = {} + for opt_name in opt_names: + opt_attr = getattr(method, opt_name, AttributeError) + if opt_attr is not AttributeError: + must_warn.append(f"{opt_name}={opt_attr}") + opts[opt_name] = True + elif opt_name in known_marks: + must_warn.append(f"{opt_name}=True") + opts[opt_name] = True + else: + opts[opt_name] = False + if must_warn: + hook_opts = ", ".join(must_warn) + message = _pytest.deprecated.HOOK_LEGACY_MARKING.format( + type=hook_type, + fullname=method.__qualname__, + hook_opts=hook_opts, + ) + warn_explicit_for(cast(FunctionType, method), message) + return opts + + +@final +class PytestPluginManager(PluginManager): + """A :py:class:`pluggy.PluginManager ` with + additional pytest-specific functionality: + + * Loading plugins from the command line, ``PYTEST_PLUGINS`` env variable and + ``pytest_plugins`` global variables found in plugins being loaded. + * ``conftest.py`` loading during start-up. + """ + + def __init__(self) -> None: + import _pytest.assertion + + super().__init__("pytest") + + # -- State related to local conftest plugins. + # All loaded conftest modules. + self._conftest_plugins: Set[types.ModuleType] = set() + # All conftest modules applicable for a directory. + # This includes the directory's own conftest modules as well + # as those of its parent directories. + self._dirpath2confmods: Dict[Path, List[types.ModuleType]] = {} + # Cutoff directory above which conftests are no longer discovered. + self._confcutdir: Optional[Path] = None + # If set, conftest loading is skipped. + self._noconftest = False + + # _getconftestmodules()'s call to _get_directory() causes a stat + # storm when it's called potentially thousands of times in a test + # session (#9478), often with the same path, so cache it. + self._get_directory = lru_cache(256)(_get_directory) + + self._duplicatepaths: Set[Path] = set() + + # plugins that were explicitly skipped with pytest.skip + # list of (module name, skip reason) + # previously we would issue a warning when a plugin was skipped, but + # since we refactored warnings as first citizens of Config, they are + # just stored here to be used later. + self.skipped_plugins: List[Tuple[str, str]] = [] + + self.add_hookspecs(_pytest.hookspec) + self.register(self) + if os.environ.get("PYTEST_DEBUG"): + err: IO[str] = sys.stderr + encoding: str = getattr(err, "encoding", "utf8") + try: + err = open( + os.dup(err.fileno()), + mode=err.mode, + buffering=1, + encoding=encoding, + ) + except Exception: + pass + self.trace.root.setwriter(err.write) + self.enable_tracing() + + # Config._consider_importhook will set a real object if required. + self.rewrite_hook = _pytest.assertion.DummyRewriteHook() + # Used to know when we are importing conftests after the pytest_configure stage. + self._configured = False + + def parse_hookimpl_opts(self, plugin: _PluggyPlugin, name: str): + # pytest hooks are always prefixed with "pytest_", + # so we avoid accessing possibly non-readable attributes + # (see issue #1073). + if not name.startswith("pytest_"): + return None + # Ignore names which can not be hooks. + if name == "pytest_plugins": + return None + + opts = super().parse_hookimpl_opts(plugin, name) + if opts is not None: + return opts + + method = getattr(plugin, name) + # Consider only actual functions for hooks (#3775). + if not inspect.isroutine(method): + return None + # Collect unmarked hooks as long as they have the `pytest_' prefix. + return _get_legacy_hook_marks( # type: ignore[return-value] + method, "impl", ("tryfirst", "trylast", "optionalhook", "hookwrapper") + ) + + def parse_hookspec_opts(self, module_or_class, name: str): + opts = super().parse_hookspec_opts(module_or_class, name) + if opts is None: + method = getattr(module_or_class, name) + if name.startswith("pytest_"): + opts = _get_legacy_hook_marks( # type: ignore[assignment] + method, + "spec", + ("firstresult", "historic"), + ) + return opts + + def register( + self, plugin: _PluggyPlugin, name: Optional[str] = None + ) -> Optional[str]: + if name in _pytest.deprecated.DEPRECATED_EXTERNAL_PLUGINS: + warnings.warn( + PytestConfigWarning( + "{} plugin has been merged into the core, " + "please remove it from your requirements.".format( + name.replace("_", "-") + ) + ) + ) + return None + ret: Optional[str] = super().register(plugin, name) + if ret: + self.hook.pytest_plugin_registered.call_historic( + kwargs=dict(plugin=plugin, manager=self) + ) + + if isinstance(plugin, types.ModuleType): + self.consider_module(plugin) + return ret + + def getplugin(self, name: str): + # Support deprecated naming because plugins (xdist e.g.) use it. + plugin: Optional[_PluggyPlugin] = self.get_plugin(name) + return plugin + + def hasplugin(self, name: str) -> bool: + """Return whether a plugin with the given name is registered.""" + return bool(self.get_plugin(name)) + + def pytest_configure(self, config: "Config") -> None: + """:meta private:""" + # XXX now that the pluginmanager exposes hookimpl(tryfirst...) + # we should remove tryfirst/trylast as markers. + config.addinivalue_line( + "markers", + "tryfirst: mark a hook implementation function such that the " + "plugin machinery will try to call it first/as early as possible. " + "DEPRECATED, use @pytest.hookimpl(tryfirst=True) instead.", + ) + config.addinivalue_line( + "markers", + "trylast: mark a hook implementation function such that the " + "plugin machinery will try to call it last/as late as possible. " + "DEPRECATED, use @pytest.hookimpl(trylast=True) instead.", + ) + self._configured = True + + # + # Internal API for local conftest plugin handling. + # + def _set_initial_conftests( + self, + args: Sequence[Union[str, Path]], + pyargs: bool, + noconftest: bool, + rootpath: Path, + confcutdir: Optional[Path], + importmode: Union[ImportMode, str], + ) -> None: + """Load initial conftest files given a preparsed "namespace". + + As conftest files may add their own command line options which have + arguments ('--my-opt somepath') we might get some false positives. + All builtin and 3rd party plugins will have been loaded, however, so + common options will not confuse our logic here. + """ + current = Path.cwd() + self._confcutdir = absolutepath(current / confcutdir) if confcutdir else None + self._noconftest = noconftest + self._using_pyargs = pyargs + foundanchor = False + for intitial_path in args: + path = str(intitial_path) + # remove node-id syntax + i = path.find("::") + if i != -1: + path = path[:i] + anchor = absolutepath(current / path) + + # Ensure we do not break if what appears to be an anchor + # is in fact a very long option (#10169, #11394). + if safe_exists(anchor): + self._try_load_conftest(anchor, importmode, rootpath) + foundanchor = True + if not foundanchor: + self._try_load_conftest(current, importmode, rootpath) + + def _is_in_confcutdir(self, path: Path) -> bool: + """Whether a path is within the confcutdir. + + When false, should not load conftest. + """ + if self._confcutdir is None: + return True + return path not in self._confcutdir.parents + + def _try_load_conftest( + self, anchor: Path, importmode: Union[str, ImportMode], rootpath: Path + ) -> None: + self._getconftestmodules(anchor, importmode, rootpath) + # let's also consider test* subdirs + if anchor.is_dir(): + for x in anchor.glob("test*"): + if x.is_dir(): + self._getconftestmodules(x, importmode, rootpath) + + def _getconftestmodules( + self, path: Path, importmode: Union[str, ImportMode], rootpath: Path + ) -> Sequence[types.ModuleType]: + if self._noconftest: + return [] + + directory = self._get_directory(path) + + # Optimization: avoid repeated searches in the same directory. + # Assumes always called with same importmode and rootpath. + existing_clist = self._dirpath2confmods.get(directory) + if existing_clist is not None: + return existing_clist + + # XXX these days we may rather want to use config.rootpath + # and allow users to opt into looking into the rootdir parent + # directories instead of requiring to specify confcutdir. + clist = [] + for parent in reversed((directory, *directory.parents)): + if self._is_in_confcutdir(parent): + conftestpath = parent / "conftest.py" + if conftestpath.is_file(): + mod = self._importconftest(conftestpath, importmode, rootpath) + clist.append(mod) + self._dirpath2confmods[directory] = clist + return clist + + def _rget_with_confmod( + self, + name: str, + path: Path, + importmode: Union[str, ImportMode], + rootpath: Path, + ) -> Tuple[types.ModuleType, Any]: + modules = self._getconftestmodules(path, importmode, rootpath=rootpath) + for mod in reversed(modules): + try: + return mod, getattr(mod, name) + except AttributeError: + continue + raise KeyError(name) + + def _importconftest( + self, conftestpath: Path, importmode: Union[str, ImportMode], rootpath: Path + ) -> types.ModuleType: + existing = self.get_plugin(str(conftestpath)) + if existing is not None: + return cast(types.ModuleType, existing) + + pkgpath = resolve_package_path(conftestpath) + if pkgpath is None: + _ensure_removed_sysmodule(conftestpath.stem) + + try: + mod = import_path(conftestpath, mode=importmode, root=rootpath) + except Exception as e: + assert e.__traceback__ is not None + exc_info = (type(e), e, e.__traceback__) + raise ConftestImportFailure(conftestpath, exc_info) from e + + self._check_non_top_pytest_plugins(mod, conftestpath) + + self._conftest_plugins.add(mod) + dirpath = conftestpath.parent + if dirpath in self._dirpath2confmods: + for path, mods in self._dirpath2confmods.items(): + if dirpath in path.parents or path == dirpath: + assert mod not in mods + mods.append(mod) + self.trace(f"loading conftestmodule {mod!r}") + self.consider_conftest(mod) + return mod + + def _check_non_top_pytest_plugins( + self, + mod: types.ModuleType, + conftestpath: Path, + ) -> None: + if ( + hasattr(mod, "pytest_plugins") + and self._configured + and not self._using_pyargs + ): + msg = ( + "Defining 'pytest_plugins' in a non-top-level conftest is no longer supported:\n" + "It affects the entire test suite instead of just below the conftest as expected.\n" + " {}\n" + "Please move it to a top level conftest file at the rootdir:\n" + " {}\n" + "For more information, visit:\n" + " https://docs.pytest.org/en/stable/deprecations.html#pytest-plugins-in-non-top-level-conftest-files" + ) + fail(msg.format(conftestpath, self._confcutdir), pytrace=False) + + # + # API for bootstrapping plugin loading + # + # + + def consider_preparse( + self, args: Sequence[str], *, exclude_only: bool = False + ) -> None: + """:meta private:""" + i = 0 + n = len(args) + while i < n: + opt = args[i] + i += 1 + if isinstance(opt, str): + if opt == "-p": + try: + parg = args[i] + except IndexError: + return + i += 1 + elif opt.startswith("-p"): + parg = opt[2:] + else: + continue + parg = parg.strip() + if exclude_only and not parg.startswith("no:"): + continue + self.consider_pluginarg(parg) + + def consider_pluginarg(self, arg: str) -> None: + """:meta private:""" + if arg.startswith("no:"): + name = arg[3:] + if name in essential_plugins: + raise UsageError("plugin %s cannot be disabled" % name) + + # PR #4304: remove stepwise if cacheprovider is blocked. + if name == "cacheprovider": + self.set_blocked("stepwise") + self.set_blocked("pytest_stepwise") + + self.set_blocked(name) + if not name.startswith("pytest_"): + self.set_blocked("pytest_" + name) + else: + name = arg + # Unblock the plugin. None indicates that it has been blocked. + # There is no interface with pluggy for this. + if self._name2plugin.get(name, -1) is None: + del self._name2plugin[name] + if not name.startswith("pytest_"): + if self._name2plugin.get("pytest_" + name, -1) is None: + del self._name2plugin["pytest_" + name] + self.import_plugin(arg, consider_entry_points=True) + + def consider_conftest(self, conftestmodule: types.ModuleType) -> None: + """:meta private:""" + self.register(conftestmodule, name=conftestmodule.__file__) + + def consider_env(self) -> None: + """:meta private:""" + self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS")) + + def consider_module(self, mod: types.ModuleType) -> None: + """:meta private:""" + self._import_plugin_specs(getattr(mod, "pytest_plugins", [])) + + def _import_plugin_specs( + self, spec: Union[None, types.ModuleType, str, Sequence[str]] + ) -> None: + plugins = _get_plugin_specs_as_list(spec) + for import_spec in plugins: + self.import_plugin(import_spec) + + def import_plugin(self, modname: str, consider_entry_points: bool = False) -> None: + """Import a plugin with ``modname``. + + If ``consider_entry_points`` is True, entry point names are also + considered to find a plugin. + """ + # Most often modname refers to builtin modules, e.g. "pytester", + # "terminal" or "capture". Those plugins are registered under their + # basename for historic purposes but must be imported with the + # _pytest prefix. + assert isinstance(modname, str), ( + "module name as text required, got %r" % modname + ) + if self.is_blocked(modname) or self.get_plugin(modname) is not None: + return + + importspec = "_pytest." + modname if modname in builtin_plugins else modname + self.rewrite_hook.mark_rewrite(importspec) + + if consider_entry_points: + loaded = self.load_setuptools_entrypoints("pytest11", name=modname) + if loaded: + return + + try: + __import__(importspec) + except ImportError as e: + raise ImportError( + f'Error importing plugin "{modname}": {e.args[0]}' + ).with_traceback(e.__traceback__) from e + + except Skipped as e: + self.skipped_plugins.append((modname, e.msg or "")) + else: + mod = sys.modules[importspec] + self.register(mod, modname) + + +def _get_plugin_specs_as_list( + specs: Union[None, types.ModuleType, str, Sequence[str]] +) -> List[str]: + """Parse a plugins specification into a list of plugin names.""" + # None means empty. + if specs is None: + return [] + # Workaround for #3899 - a submodule which happens to be called "pytest_plugins". + if isinstance(specs, types.ModuleType): + return [] + # Comma-separated list. + if isinstance(specs, str): + return specs.split(",") if specs else [] + # Direct specification. + if isinstance(specs, collections.abc.Sequence): + return list(specs) + raise UsageError( + "Plugins may be specified as a sequence or a ','-separated string of plugin names. Got: %r" + % specs + ) + + +def _ensure_removed_sysmodule(modname: str) -> None: + try: + del sys.modules[modname] + except KeyError: + pass + + +class Notset: + def __repr__(self): + return "" + + +notset = Notset() + + +def _iter_rewritable_modules(package_files: Iterable[str]) -> Iterator[str]: + """Given an iterable of file names in a source distribution, return the "names" that should + be marked for assertion rewrite. + + For example the package "pytest_mock/__init__.py" should be added as "pytest_mock" in + the assertion rewrite mechanism. + + This function has to deal with dist-info based distributions and egg based distributions + (which are still very much in use for "editable" installs). + + Here are the file names as seen in a dist-info based distribution: + + pytest_mock/__init__.py + pytest_mock/_version.py + pytest_mock/plugin.py + pytest_mock.egg-info/PKG-INFO + + Here are the file names as seen in an egg based distribution: + + src/pytest_mock/__init__.py + src/pytest_mock/_version.py + src/pytest_mock/plugin.py + src/pytest_mock.egg-info/PKG-INFO + LICENSE + setup.py + + We have to take in account those two distribution flavors in order to determine which + names should be considered for assertion rewriting. + + More information: + https://github.com/pytest-dev/pytest-mock/issues/167 + """ + package_files = list(package_files) + seen_some = False + for fn in package_files: + is_simple_module = "/" not in fn and fn.endswith(".py") + is_package = fn.count("/") == 1 and fn.endswith("__init__.py") + if is_simple_module: + module_name, _ = os.path.splitext(fn) + # we ignore "setup.py" at the root of the distribution + # as well as editable installation finder modules made by setuptools + if module_name != "setup" and not module_name.startswith("__editable__"): + seen_some = True + yield module_name + elif is_package: + package_name = os.path.dirname(fn) + seen_some = True + yield package_name + + if not seen_some: + # At this point we did not find any packages or modules suitable for assertion + # rewriting, so we try again by stripping the first path component (to account for + # "src" based source trees for example). + # This approach lets us have the common case continue to be fast, as egg-distributions + # are rarer. + new_package_files = [] + for fn in package_files: + parts = fn.split("/") + new_fn = "/".join(parts[1:]) + if new_fn: + new_package_files.append(new_fn) + if new_package_files: + yield from _iter_rewritable_modules(new_package_files) + + +@final +class Config: + """Access to configuration values, pluginmanager and plugin hooks. + + :param PytestPluginManager pluginmanager: + A pytest PluginManager. + + :param InvocationParams invocation_params: + Object containing parameters regarding the :func:`pytest.main` + invocation. + """ + + @final + @dataclasses.dataclass(frozen=True) + class InvocationParams: + """Holds parameters passed during :func:`pytest.main`. + + The object attributes are read-only. + + .. versionadded:: 5.1 + + .. note:: + + Note that the environment variable ``PYTEST_ADDOPTS`` and the ``addopts`` + ini option are handled by pytest, not being included in the ``args`` attribute. + + Plugins accessing ``InvocationParams`` must be aware of that. + """ + + args: Tuple[str, ...] + """The command-line arguments as passed to :func:`pytest.main`.""" + plugins: Optional[Sequence[Union[str, _PluggyPlugin]]] + """Extra plugins, might be `None`.""" + dir: Path + """The directory from which :func:`pytest.main` was invoked.""" + + def __init__( + self, + *, + args: Iterable[str], + plugins: Optional[Sequence[Union[str, _PluggyPlugin]]], + dir: Path, + ) -> None: + object.__setattr__(self, "args", tuple(args)) + object.__setattr__(self, "plugins", plugins) + object.__setattr__(self, "dir", dir) + + class ArgsSource(enum.Enum): + """Indicates the source of the test arguments. + + .. versionadded:: 7.2 + """ + + #: Command line arguments. + ARGS = enum.auto() + #: Invocation directory. + INCOVATION_DIR = enum.auto() + #: 'testpaths' configuration value. + TESTPATHS = enum.auto() + + def __init__( + self, + pluginmanager: PytestPluginManager, + *, + invocation_params: Optional[InvocationParams] = None, + ) -> None: + from .argparsing import Parser, FILE_OR_DIR + + if invocation_params is None: + invocation_params = self.InvocationParams( + args=(), plugins=None, dir=Path.cwd() + ) + + self.option = argparse.Namespace() + """Access to command line option as attributes. + + :type: argparse.Namespace + """ + + self.invocation_params = invocation_params + """The parameters with which pytest was invoked. + + :type: InvocationParams + """ + + _a = FILE_OR_DIR + self._parser = Parser( + usage=f"%(prog)s [options] [{_a}] [{_a}] [...]", + processopt=self._processopt, + _ispytest=True, + ) + self.pluginmanager = pluginmanager + """The plugin manager handles plugin registration and hook invocation. + + :type: PytestPluginManager + """ + + self.stash = Stash() + """A place where plugins can store information on the config for their + own use. + + :type: Stash + """ + # Deprecated alias. Was never public. Can be removed in a few releases. + self._store = self.stash + + from .compat import PathAwareHookProxy + + self.trace = self.pluginmanager.trace.root.get("config") + self.hook = PathAwareHookProxy(self.pluginmanager.hook) + self._inicache: Dict[str, Any] = {} + self._override_ini: Sequence[str] = () + self._opt2dest: Dict[str, str] = {} + self._cleanup: List[Callable[[], None]] = [] + self.pluginmanager.register(self, "pytestconfig") + self._configured = False + self.hook.pytest_addoption.call_historic( + kwargs=dict(parser=self._parser, pluginmanager=self.pluginmanager) + ) + self.args_source = Config.ArgsSource.ARGS + self.args: List[str] = [] + + if TYPE_CHECKING: + from _pytest.cacheprovider import Cache + + self.cache: Optional[Cache] = None + + @property + def rootpath(self) -> Path: + """The path to the :ref:`rootdir `. + + :type: pathlib.Path + + .. versionadded:: 6.1 + """ + return self._rootpath + + @property + def inipath(self) -> Optional[Path]: + """The path to the :ref:`configfile `. + + :type: Optional[pathlib.Path] + + .. versionadded:: 6.1 + """ + return self._inipath + + def add_cleanup(self, func: Callable[[], None]) -> None: + """Add a function to be called when the config object gets out of + use (usually coinciding with pytest_unconfigure).""" + self._cleanup.append(func) + + def _do_configure(self) -> None: + assert not self._configured + self._configured = True + with warnings.catch_warnings(): + warnings.simplefilter("default") + self.hook.pytest_configure.call_historic(kwargs=dict(config=self)) + + def _ensure_unconfigure(self) -> None: + if self._configured: + self._configured = False + self.hook.pytest_unconfigure(config=self) + self.hook.pytest_configure._call_history = [] + while self._cleanup: + fin = self._cleanup.pop() + fin() + + def get_terminal_writer(self) -> TerminalWriter: + terminalreporter: Optional[TerminalReporter] = self.pluginmanager.get_plugin( + "terminalreporter" + ) + assert terminalreporter is not None + return terminalreporter._tw + + def pytest_cmdline_parse( + self, pluginmanager: PytestPluginManager, args: List[str] + ) -> "Config": + try: + self.parse(args) + except UsageError: + # Handle --version and --help here in a minimal fashion. + # This gets done via helpconfig normally, but its + # pytest_cmdline_main is not called in case of errors. + if getattr(self.option, "version", False) or "--version" in args: + from _pytest.helpconfig import showversion + + showversion(self) + elif ( + getattr(self.option, "help", False) or "--help" in args or "-h" in args + ): + self._parser._getparser().print_help() + sys.stdout.write( + "\nNOTE: displaying only minimal help due to UsageError.\n\n" + ) + + raise + + return self + + def notify_exception( + self, + excinfo: ExceptionInfo[BaseException], + option: Optional[argparse.Namespace] = None, + ) -> None: + if option and getattr(option, "fulltrace", False): + style: _TracebackStyle = "long" + else: + style = "native" + excrepr = excinfo.getrepr( + funcargs=True, showlocals=getattr(option, "showlocals", False), style=style + ) + res = self.hook.pytest_internalerror(excrepr=excrepr, excinfo=excinfo) + if not any(res): + for line in str(excrepr).split("\n"): + sys.stderr.write("INTERNALERROR> %s\n" % line) + sys.stderr.flush() + + def cwd_relative_nodeid(self, nodeid: str) -> str: + # nodeid's are relative to the rootpath, compute relative to cwd. + if self.invocation_params.dir != self.rootpath: + fullpath = self.rootpath / nodeid + nodeid = bestrelpath(self.invocation_params.dir, fullpath) + return nodeid + + @classmethod + def fromdictargs(cls, option_dict, args) -> "Config": + """Constructor usable for subprocesses.""" + config = get_config(args) + config.option.__dict__.update(option_dict) + config.parse(args, addopts=False) + for x in config.option.plugins: + config.pluginmanager.consider_pluginarg(x) + return config + + def _processopt(self, opt: "Argument") -> None: + for name in opt._short_opts + opt._long_opts: + self._opt2dest[name] = opt.dest + + if hasattr(opt, "default"): + if not hasattr(self.option, opt.dest): + setattr(self.option, opt.dest, opt.default) + + @hookimpl(trylast=True) + def pytest_load_initial_conftests(self, early_config: "Config") -> None: + # We haven't fully parsed the command line arguments yet, so + # early_config.args it not set yet. But we need it for + # discovering the initial conftests. So "pre-run" the logic here. + # It will be done for real in `parse()`. + args, args_source = early_config._decide_args( + args=early_config.known_args_namespace.file_or_dir, + pyargs=early_config.known_args_namespace.pyargs, + testpaths=early_config.getini("testpaths"), + invocation_dir=early_config.invocation_params.dir, + rootpath=early_config.rootpath, + warn=False, + ) + self.pluginmanager._set_initial_conftests( + args=args, + pyargs=early_config.known_args_namespace.pyargs, + noconftest=early_config.known_args_namespace.noconftest, + rootpath=early_config.rootpath, + confcutdir=early_config.known_args_namespace.confcutdir, + importmode=early_config.known_args_namespace.importmode, + ) + + def _initini(self, args: Sequence[str]) -> None: + ns, unknown_args = self._parser.parse_known_and_unknown_args( + args, namespace=copy.copy(self.option) + ) + rootpath, inipath, inicfg = determine_setup( + ns.inifilename, + ns.file_or_dir + unknown_args, + rootdir_cmd_arg=ns.rootdir or None, + config=self, + ) + self._rootpath = rootpath + self._inipath = inipath + self.inicfg = inicfg + self._parser.extra_info["rootdir"] = str(self.rootpath) + self._parser.extra_info["inifile"] = str(self.inipath) + self._parser.addini("addopts", "Extra command line options", "args") + self._parser.addini("minversion", "Minimally required pytest version") + self._parser.addini( + "required_plugins", + "Plugins that must be present for pytest to run", + type="args", + default=[], + ) + self._override_ini = ns.override_ini or () + + def _consider_importhook(self, args: Sequence[str]) -> None: + """Install the PEP 302 import hook if using assertion rewriting. + + Needs to parse the --assert= option from the commandline + and find all the installed plugins to mark them for rewriting + by the importhook. + """ + ns, unknown_args = self._parser.parse_known_and_unknown_args(args) + mode = getattr(ns, "assertmode", "plain") + if mode == "rewrite": + import _pytest.assertion + + try: + hook = _pytest.assertion.install_importhook(self) + except SystemError: + mode = "plain" + else: + self._mark_plugins_for_rewrite(hook) + self._warn_about_missing_assertion(mode) + + def _mark_plugins_for_rewrite(self, hook) -> None: + """Given an importhook, mark for rewrite any top-level + modules or packages in the distribution package for + all pytest plugins.""" + self.pluginmanager.rewrite_hook = hook + + if os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD"): + # We don't autoload from setuptools entry points, no need to continue. + return + + package_files = ( + str(file) + for dist in importlib_metadata.distributions() + if any(ep.group == "pytest11" for ep in dist.entry_points) + for file in dist.files or [] + ) + + for name in _iter_rewritable_modules(package_files): + hook.mark_rewrite(name) + + def _validate_args(self, args: List[str], via: str) -> List[str]: + """Validate known args.""" + self._parser._config_source_hint = via # type: ignore + try: + self._parser.parse_known_and_unknown_args( + args, namespace=copy.copy(self.option) + ) + finally: + del self._parser._config_source_hint # type: ignore + + return args + + def _decide_args( + self, + *, + args: List[str], + pyargs: List[str], + testpaths: List[str], + invocation_dir: Path, + rootpath: Path, + warn: bool, + ) -> Tuple[List[str], ArgsSource]: + """Decide the args (initial paths/nodeids) to use given the relevant inputs. + + :param warn: Whether can issue warnings. + """ + if args: + source = Config.ArgsSource.ARGS + result = args + else: + if invocation_dir == rootpath: + source = Config.ArgsSource.TESTPATHS + if pyargs: + result = testpaths + else: + result = [] + for path in testpaths: + result.extend(sorted(glob.iglob(path, recursive=True))) + if testpaths and not result: + if warn: + warning_text = ( + "No files were found in testpaths; " + "consider removing or adjusting your testpaths configuration. " + "Searching recursively from the current directory instead." + ) + self.issue_config_time_warning( + PytestConfigWarning(warning_text), stacklevel=3 + ) + else: + result = [] + if not result: + source = Config.ArgsSource.INCOVATION_DIR + result = [str(invocation_dir)] + return result, source + + def _preparse(self, args: List[str], addopts: bool = True) -> None: + if addopts: + env_addopts = os.environ.get("PYTEST_ADDOPTS", "") + if len(env_addopts): + args[:] = ( + self._validate_args(shlex.split(env_addopts), "via PYTEST_ADDOPTS") + + args + ) + self._initini(args) + if addopts: + args[:] = ( + self._validate_args(self.getini("addopts"), "via addopts config") + args + ) + + self.known_args_namespace = self._parser.parse_known_args( + args, namespace=copy.copy(self.option) + ) + self._checkversion() + self._consider_importhook(args) + self.pluginmanager.consider_preparse(args, exclude_only=False) + if not os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD"): + # Don't autoload from setuptools entry point. Only explicitly specified + # plugins are going to be loaded. + self.pluginmanager.load_setuptools_entrypoints("pytest11") + self.pluginmanager.consider_env() + + self.known_args_namespace = self._parser.parse_known_args( + args, namespace=copy.copy(self.known_args_namespace) + ) + + self._validate_plugins() + self._warn_about_skipped_plugins() + + if self.known_args_namespace.strict: + self.issue_config_time_warning( + _pytest.deprecated.STRICT_OPTION, stacklevel=2 + ) + + if self.known_args_namespace.confcutdir is None: + if self.inipath is not None: + confcutdir = str(self.inipath.parent) + else: + confcutdir = str(self.rootpath) + self.known_args_namespace.confcutdir = confcutdir + try: + self.hook.pytest_load_initial_conftests( + early_config=self, args=args, parser=self._parser + ) + except ConftestImportFailure as e: + if self.known_args_namespace.help or self.known_args_namespace.version: + # we don't want to prevent --help/--version to work + # so just let is pass and print a warning at the end + self.issue_config_time_warning( + PytestConfigWarning(f"could not load initial conftests: {e.path}"), + stacklevel=2, + ) + else: + raise + + @hookimpl(hookwrapper=True) + def pytest_collection(self) -> Generator[None, None, None]: + # Validate invalid ini keys after collection is done so we take in account + # options added by late-loading conftest files. + yield + self._validate_config_options() + + def _checkversion(self) -> None: + import pytest + + minver = self.inicfg.get("minversion", None) + if minver: + # Imported lazily to improve start-up time. + from packaging.version import Version + + if not isinstance(minver, str): + raise pytest.UsageError( + "%s: 'minversion' must be a single value" % self.inipath + ) + + if Version(minver) > Version(pytest.__version__): + raise pytest.UsageError( + "%s: 'minversion' requires pytest-%s, actual pytest-%s'" + % ( + self.inipath, + minver, + pytest.__version__, + ) + ) + + def _validate_config_options(self) -> None: + for key in sorted(self._get_unknown_ini_keys()): + self._warn_or_fail_if_strict(f"Unknown config option: {key}\n") + + def _validate_plugins(self) -> None: + required_plugins = sorted(self.getini("required_plugins")) + if not required_plugins: + return + + # Imported lazily to improve start-up time. + from packaging.version import Version + from packaging.requirements import InvalidRequirement, Requirement + + plugin_info = self.pluginmanager.list_plugin_distinfo() + plugin_dist_info = {dist.project_name: dist.version for _, dist in plugin_info} + + missing_plugins = [] + for required_plugin in required_plugins: + try: + req = Requirement(required_plugin) + except InvalidRequirement: + missing_plugins.append(required_plugin) + continue + + if req.name not in plugin_dist_info: + missing_plugins.append(required_plugin) + elif not req.specifier.contains( + Version(plugin_dist_info[req.name]), prereleases=True + ): + missing_plugins.append(required_plugin) + + if missing_plugins: + raise UsageError( + "Missing required plugins: {}".format(", ".join(missing_plugins)), + ) + + def _warn_or_fail_if_strict(self, message: str) -> None: + if self.known_args_namespace.strict_config: + raise UsageError(message) + + self.issue_config_time_warning(PytestConfigWarning(message), stacklevel=3) + + def _get_unknown_ini_keys(self) -> List[str]: + parser_inicfg = self._parser._inidict + return [name for name in self.inicfg if name not in parser_inicfg] + + def parse(self, args: List[str], addopts: bool = True) -> None: + # Parse given cmdline arguments into this config object. + assert ( + self.args == [] + ), "can only parse cmdline args at most once per Config object" + self.hook.pytest_addhooks.call_historic( + kwargs=dict(pluginmanager=self.pluginmanager) + ) + self._preparse(args, addopts=addopts) + # XXX deprecated hook: + self.hook.pytest_cmdline_preparse(config=self, args=args) + self._parser.after_preparse = True # type: ignore + try: + args = self._parser.parse_setoption( + args, self.option, namespace=self.option + ) + self.args, self.args_source = self._decide_args( + args=args, + pyargs=self.known_args_namespace.pyargs, + testpaths=self.getini("testpaths"), + invocation_dir=self.invocation_params.dir, + rootpath=self.rootpath, + warn=True, + ) + except PrintHelp: + pass + + def issue_config_time_warning(self, warning: Warning, stacklevel: int) -> None: + """Issue and handle a warning during the "configure" stage. + + During ``pytest_configure`` we can't capture warnings using the ``catch_warnings_for_item`` + function because it is not possible to have hookwrappers around ``pytest_configure``. + + This function is mainly intended for plugins that need to issue warnings during + ``pytest_configure`` (or similar stages). + + :param warning: The warning instance. + :param stacklevel: stacklevel forwarded to warnings.warn. + """ + if self.pluginmanager.is_blocked("warnings"): + return + + cmdline_filters = self.known_args_namespace.pythonwarnings or [] + config_filters = self.getini("filterwarnings") + + with warnings.catch_warnings(record=True) as records: + warnings.simplefilter("always", type(warning)) + apply_warning_filters(config_filters, cmdline_filters) + warnings.warn(warning, stacklevel=stacklevel) + + if records: + frame = sys._getframe(stacklevel - 1) + location = frame.f_code.co_filename, frame.f_lineno, frame.f_code.co_name + self.hook.pytest_warning_recorded.call_historic( + kwargs=dict( + warning_message=records[0], + when="config", + nodeid="", + location=location, + ) + ) + + def addinivalue_line(self, name: str, line: str) -> None: + """Add a line to an ini-file option. The option must have been + declared but might not yet be set in which case the line becomes + the first line in its value.""" + x = self.getini(name) + assert isinstance(x, list) + x.append(line) # modifies the cached list inline + + def getini(self, name: str): + """Return configuration value from an :ref:`ini file `. + + If the specified name hasn't been registered through a prior + :func:`parser.addini ` call (usually from a + plugin), a ValueError is raised. + """ + try: + return self._inicache[name] + except KeyError: + self._inicache[name] = val = self._getini(name) + return val + + # Meant for easy monkeypatching by legacypath plugin. + # Can be inlined back (with no cover removed) once legacypath is gone. + def _getini_unknown_type(self, name: str, type: str, value: Union[str, List[str]]): + msg = f"unknown configuration type: {type}" + raise ValueError(msg, value) # pragma: no cover + + def _getini(self, name: str): + try: + description, type, default = self._parser._inidict[name] + except KeyError as e: + raise ValueError(f"unknown configuration value: {name!r}") from e + override_value = self._get_override_ini_value(name) + if override_value is None: + try: + value = self.inicfg[name] + except KeyError: + if default is not None: + return default + if type is None: + return "" + return [] + else: + value = override_value + # Coerce the values based on types. + # + # Note: some coercions are only required if we are reading from .ini files, because + # the file format doesn't contain type information, but when reading from toml we will + # get either str or list of str values (see _parse_ini_config_from_pyproject_toml). + # For example: + # + # ini: + # a_line_list = "tests acceptance" + # in this case, we need to split the string to obtain a list of strings. + # + # toml: + # a_line_list = ["tests", "acceptance"] + # in this case, we already have a list ready to use. + # + if type == "paths": + # TODO: This assert is probably not valid in all cases. + assert self.inipath is not None + dp = self.inipath.parent + input_values = shlex.split(value) if isinstance(value, str) else value + return [dp / x for x in input_values] + elif type == "args": + return shlex.split(value) if isinstance(value, str) else value + elif type == "linelist": + if isinstance(value, str): + return [t for t in map(lambda x: x.strip(), value.split("\n")) if t] + else: + return value + elif type == "bool": + return _strtobool(str(value).strip()) + elif type == "string": + return value + elif type is None: + return value + else: + return self._getini_unknown_type(name, type, value) + + def _getconftest_pathlist( + self, name: str, path: Path, rootpath: Path + ) -> Optional[List[Path]]: + try: + mod, relroots = self.pluginmanager._rget_with_confmod( + name, path, self.getoption("importmode"), rootpath + ) + except KeyError: + return None + assert mod.__file__ is not None + modpath = Path(mod.__file__).parent + values: List[Path] = [] + for relroot in relroots: + if isinstance(relroot, os.PathLike): + relroot = Path(relroot) + else: + relroot = relroot.replace("/", os.sep) + relroot = absolutepath(modpath / relroot) + values.append(relroot) + return values + + def _get_override_ini_value(self, name: str) -> Optional[str]: + value = None + # override_ini is a list of "ini=value" options. + # Always use the last item if multiple values are set for same ini-name, + # e.g. -o foo=bar1 -o foo=bar2 will set foo to bar2. + for ini_config in self._override_ini: + try: + key, user_ini_value = ini_config.split("=", 1) + except ValueError as e: + raise UsageError( + "-o/--override-ini expects option=value style (got: {!r}).".format( + ini_config + ) + ) from e + else: + if key == name: + value = user_ini_value + return value + + def getoption(self, name: str, default=notset, skip: bool = False): + """Return command line option value. + + :param name: Name of the option. You may also specify + the literal ``--OPT`` option instead of the "dest" option name. + :param default: Default value if no option of that name exists. + :param skip: If True, raise pytest.skip if option does not exists + or has a None value. + """ + name = self._opt2dest.get(name, name) + try: + val = getattr(self.option, name) + if val is None and skip: + raise AttributeError(name) + return val + except AttributeError as e: + if default is not notset: + return default + if skip: + import pytest + + pytest.skip(f"no {name!r} option found") + raise ValueError(f"no option named {name!r}") from e + + def getvalue(self, name: str, path=None): + """Deprecated, use getoption() instead.""" + return self.getoption(name) + + def getvalueorskip(self, name: str, path=None): + """Deprecated, use getoption(skip=True) instead.""" + return self.getoption(name, skip=True) + + def _warn_about_missing_assertion(self, mode: str) -> None: + if not _assertion_supported(): + if mode == "plain": + warning_text = ( + "ASSERTIONS ARE NOT EXECUTED" + " and FAILING TESTS WILL PASS. Are you" + " using python -O?" + ) + else: + warning_text = ( + "assertions not in test modules or" + " plugins will be ignored" + " because assert statements are not executed " + "by the underlying Python interpreter " + "(are you using python -O?)\n" + ) + self.issue_config_time_warning( + PytestConfigWarning(warning_text), + stacklevel=3, + ) + + def _warn_about_skipped_plugins(self) -> None: + for module_name, msg in self.pluginmanager.skipped_plugins: + self.issue_config_time_warning( + PytestConfigWarning(f"skipped plugin {module_name!r}: {msg}"), + stacklevel=2, + ) + + +def _assertion_supported() -> bool: + try: + assert False + except AssertionError: + return True + else: + return False # type: ignore[unreachable] + + +def create_terminal_writer( + config: Config, file: Optional[TextIO] = None +) -> TerminalWriter: + """Create a TerminalWriter instance configured according to the options + in the config object. + + Every code which requires a TerminalWriter object and has access to a + config object should use this function. + """ + tw = TerminalWriter(file=file) + + if config.option.color == "yes": + tw.hasmarkup = True + elif config.option.color == "no": + tw.hasmarkup = False + + if config.option.code_highlight == "yes": + tw.code_highlight = True + elif config.option.code_highlight == "no": + tw.code_highlight = False + + return tw + + +def _strtobool(val: str) -> bool: + """Convert a string representation of truth to True or False. + + True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values + are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if + 'val' is anything else. + + .. note:: Copied from distutils.util. + """ + val = val.lower() + if val in ("y", "yes", "t", "true", "on", "1"): + return True + elif val in ("n", "no", "f", "false", "off", "0"): + return False + else: + raise ValueError(f"invalid truth value {val!r}") + + +@lru_cache(maxsize=50) +def parse_warning_filter( + arg: str, *, escape: bool +) -> Tuple["warnings._ActionKind", str, Type[Warning], str, int]: + """Parse a warnings filter string. + + This is copied from warnings._setoption with the following changes: + + * Does not apply the filter. + * Escaping is optional. + * Raises UsageError so we get nice error messages on failure. + """ + __tracebackhide__ = True + error_template = dedent( + f"""\ + while parsing the following warning configuration: + + {arg} + + This error occurred: + + {{error}} + """ + ) + + parts = arg.split(":") + if len(parts) > 5: + doc_url = ( + "https://docs.python.org/3/library/warnings.html#describing-warning-filters" + ) + error = dedent( + f"""\ + Too many fields ({len(parts)}), expected at most 5 separated by colons: + + action:message:category:module:line + + For more information please consult: {doc_url} + """ + ) + raise UsageError(error_template.format(error=error)) + + while len(parts) < 5: + parts.append("") + action_, message, category_, module, lineno_ = (s.strip() for s in parts) + try: + action: "warnings._ActionKind" = warnings._getaction(action_) # type: ignore[attr-defined] + except warnings._OptionError as e: + raise UsageError(error_template.format(error=str(e))) + try: + category: Type[Warning] = _resolve_warning_category(category_) + except Exception: + exc_info = ExceptionInfo.from_current() + exception_text = exc_info.getrepr(style="native") + raise UsageError(error_template.format(error=exception_text)) + if message and escape: + message = re.escape(message) + if module and escape: + module = re.escape(module) + r"\Z" + if lineno_: + try: + lineno = int(lineno_) + if lineno < 0: + raise ValueError("number is negative") + except ValueError as e: + raise UsageError( + error_template.format(error=f"invalid lineno {lineno_!r}: {e}") + ) + else: + lineno = 0 + return action, message, category, module, lineno + + +def _resolve_warning_category(category: str) -> Type[Warning]: + """ + Copied from warnings._getcategory, but changed so it lets exceptions (specially ImportErrors) + propagate so we can get access to their tracebacks (#9218). + """ + __tracebackhide__ = True + if not category: + return Warning + + if "." not in category: + import builtins as m + + klass = category + else: + module, _, klass = category.rpartition(".") + m = __import__(module, None, None, [klass]) + cat = getattr(m, klass) + if not issubclass(cat, Warning): + raise UsageError(f"{cat} is not a Warning subclass") + return cast(Type[Warning], cat) + + +def apply_warning_filters( + config_filters: Iterable[str], cmdline_filters: Iterable[str] +) -> None: + """Applies pytest-configured filters to the warnings module""" + # Filters should have this precedence: cmdline options, config. + # Filters should be applied in the inverse order of precedence. + for arg in config_filters: + warnings.filterwarnings(*parse_warning_filter(arg, escape=False)) + + for arg in cmdline_filters: + warnings.filterwarnings(*parse_warning_filter(arg, escape=True)) diff --git a/venv/lib/python3.12/site-packages/_pytest/config/argparsing.py b/venv/lib/python3.12/site-packages/_pytest/config/argparsing.py new file mode 100644 index 0000000000..d3f01916b6 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/config/argparsing.py @@ -0,0 +1,551 @@ +import argparse +import os +import sys +import warnings +from gettext import gettext +from typing import Any +from typing import Callable +from typing import cast +from typing import Dict +from typing import List +from typing import Mapping +from typing import NoReturn +from typing import Optional +from typing import Sequence +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union + +import _pytest._io +from _pytest.compat import final +from _pytest.config.exceptions import UsageError +from _pytest.deprecated import ARGUMENT_PERCENT_DEFAULT +from _pytest.deprecated import ARGUMENT_TYPE_STR +from _pytest.deprecated import ARGUMENT_TYPE_STR_CHOICE +from _pytest.deprecated import check_ispytest + +if TYPE_CHECKING: + from typing_extensions import Literal + +FILE_OR_DIR = "file_or_dir" + + +@final +class Parser: + """Parser for command line arguments and ini-file values. + + :ivar extra_info: Dict of generic param -> value to display in case + there's an error processing the command line arguments. + """ + + prog: Optional[str] = None + + def __init__( + self, + usage: Optional[str] = None, + processopt: Optional[Callable[["Argument"], None]] = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._anonymous = OptionGroup("Custom options", parser=self, _ispytest=True) + self._groups: List[OptionGroup] = [] + self._processopt = processopt + self._usage = usage + self._inidict: Dict[str, Tuple[str, Optional[str], Any]] = {} + self._ininames: List[str] = [] + self.extra_info: Dict[str, Any] = {} + + def processoption(self, option: "Argument") -> None: + if self._processopt: + if option.dest: + self._processopt(option) + + def getgroup( + self, name: str, description: str = "", after: Optional[str] = None + ) -> "OptionGroup": + """Get (or create) a named option Group. + + :param name: Name of the option group. + :param description: Long description for --help output. + :param after: Name of another group, used for ordering --help output. + :returns: The option group. + + The returned group object has an ``addoption`` method with the same + signature as :func:`parser.addoption ` but + will be shown in the respective group in the output of + ``pytest --help``. + """ + for group in self._groups: + if group.name == name: + return group + group = OptionGroup(name, description, parser=self, _ispytest=True) + i = 0 + for i, grp in enumerate(self._groups): + if grp.name == after: + break + self._groups.insert(i + 1, group) + return group + + def addoption(self, *opts: str, **attrs: Any) -> None: + """Register a command line option. + + :param opts: + Option names, can be short or long options. + :param attrs: + Same attributes as the argparse library's :py:func:`add_argument() + ` function accepts. + + After command line parsing, options are available on the pytest config + object via ``config.option.NAME`` where ``NAME`` is usually set + by passing a ``dest`` attribute, for example + ``addoption("--long", dest="NAME", ...)``. + """ + self._anonymous.addoption(*opts, **attrs) + + def parse( + self, + args: Sequence[Union[str, "os.PathLike[str]"]], + namespace: Optional[argparse.Namespace] = None, + ) -> argparse.Namespace: + from _pytest._argcomplete import try_argcomplete + + self.optparser = self._getparser() + try_argcomplete(self.optparser) + strargs = [os.fspath(x) for x in args] + return self.optparser.parse_args(strargs, namespace=namespace) + + def _getparser(self) -> "MyOptionParser": + from _pytest._argcomplete import filescompleter + + optparser = MyOptionParser(self, self.extra_info, prog=self.prog) + groups = self._groups + [self._anonymous] + for group in groups: + if group.options: + desc = group.description or group.name + arggroup = optparser.add_argument_group(desc) + for option in group.options: + n = option.names() + a = option.attrs() + arggroup.add_argument(*n, **a) + file_or_dir_arg = optparser.add_argument(FILE_OR_DIR, nargs="*") + # bash like autocompletion for dirs (appending '/') + # Type ignored because typeshed doesn't know about argcomplete. + file_or_dir_arg.completer = filescompleter # type: ignore + return optparser + + def parse_setoption( + self, + args: Sequence[Union[str, "os.PathLike[str]"]], + option: argparse.Namespace, + namespace: Optional[argparse.Namespace] = None, + ) -> List[str]: + parsedoption = self.parse(args, namespace=namespace) + for name, value in parsedoption.__dict__.items(): + setattr(option, name, value) + return cast(List[str], getattr(parsedoption, FILE_OR_DIR)) + + def parse_known_args( + self, + args: Sequence[Union[str, "os.PathLike[str]"]], + namespace: Optional[argparse.Namespace] = None, + ) -> argparse.Namespace: + """Parse the known arguments at this point. + + :returns: An argparse namespace object. + """ + return self.parse_known_and_unknown_args(args, namespace=namespace)[0] + + def parse_known_and_unknown_args( + self, + args: Sequence[Union[str, "os.PathLike[str]"]], + namespace: Optional[argparse.Namespace] = None, + ) -> Tuple[argparse.Namespace, List[str]]: + """Parse the known arguments at this point, and also return the + remaining unknown arguments. + + :returns: + A tuple containing an argparse namespace object for the known + arguments, and a list of the unknown arguments. + """ + optparser = self._getparser() + strargs = [os.fspath(x) for x in args] + return optparser.parse_known_args(strargs, namespace=namespace) + + def addini( + self, + name: str, + help: str, + type: Optional[ + "Literal['string', 'paths', 'pathlist', 'args', 'linelist', 'bool']" + ] = None, + default: Any = None, + ) -> None: + """Register an ini-file option. + + :param name: + Name of the ini-variable. + :param type: + Type of the variable. Can be: + + * ``string``: a string + * ``bool``: a boolean + * ``args``: a list of strings, separated as in a shell + * ``linelist``: a list of strings, separated by line breaks + * ``paths``: a list of :class:`pathlib.Path`, separated as in a shell + * ``pathlist``: a list of ``py.path``, separated as in a shell + + .. versionadded:: 7.0 + The ``paths`` variable type. + + Defaults to ``string`` if ``None`` or not passed. + :param default: + Default value if no ini-file option exists but is queried. + + The value of ini-variables can be retrieved via a call to + :py:func:`config.getini(name) `. + """ + assert type in (None, "string", "paths", "pathlist", "args", "linelist", "bool") + self._inidict[name] = (help, type, default) + self._ininames.append(name) + + +class ArgumentError(Exception): + """Raised if an Argument instance is created with invalid or + inconsistent arguments.""" + + def __init__(self, msg: str, option: Union["Argument", str]) -> None: + self.msg = msg + self.option_id = str(option) + + def __str__(self) -> str: + if self.option_id: + return f"option {self.option_id}: {self.msg}" + else: + return self.msg + + +class Argument: + """Class that mimics the necessary behaviour of optparse.Option. + + It's currently a least effort implementation and ignoring choices + and integer prefixes. + + https://docs.python.org/3/library/optparse.html#optparse-standard-option-types + """ + + _typ_map = {"int": int, "string": str, "float": float, "complex": complex} + + def __init__(self, *names: str, **attrs: Any) -> None: + """Store params in private vars for use in add_argument.""" + self._attrs = attrs + self._short_opts: List[str] = [] + self._long_opts: List[str] = [] + if "%default" in (attrs.get("help") or ""): + warnings.warn(ARGUMENT_PERCENT_DEFAULT, stacklevel=3) + try: + typ = attrs["type"] + except KeyError: + pass + else: + # This might raise a keyerror as well, don't want to catch that. + if isinstance(typ, str): + if typ == "choice": + warnings.warn( + ARGUMENT_TYPE_STR_CHOICE.format(typ=typ, names=names), + stacklevel=4, + ) + # argparse expects a type here take it from + # the type of the first element + attrs["type"] = type(attrs["choices"][0]) + else: + warnings.warn( + ARGUMENT_TYPE_STR.format(typ=typ, names=names), stacklevel=4 + ) + attrs["type"] = Argument._typ_map[typ] + # Used in test_parseopt -> test_parse_defaultgetter. + self.type = attrs["type"] + else: + self.type = typ + try: + # Attribute existence is tested in Config._processopt. + self.default = attrs["default"] + except KeyError: + pass + self._set_opt_strings(names) + dest: Optional[str] = attrs.get("dest") + if dest: + self.dest = dest + elif self._long_opts: + self.dest = self._long_opts[0][2:].replace("-", "_") + else: + try: + self.dest = self._short_opts[0][1:] + except IndexError as e: + self.dest = "???" # Needed for the error repr. + raise ArgumentError("need a long or short option", self) from e + + def names(self) -> List[str]: + return self._short_opts + self._long_opts + + def attrs(self) -> Mapping[str, Any]: + # Update any attributes set by processopt. + attrs = "default dest help".split() + attrs.append(self.dest) + for attr in attrs: + try: + self._attrs[attr] = getattr(self, attr) + except AttributeError: + pass + if self._attrs.get("help"): + a = self._attrs["help"] + a = a.replace("%default", "%(default)s") + # a = a.replace('%prog', '%(prog)s') + self._attrs["help"] = a + return self._attrs + + def _set_opt_strings(self, opts: Sequence[str]) -> None: + """Directly from optparse. + + Might not be necessary as this is passed to argparse later on. + """ + for opt in opts: + if len(opt) < 2: + raise ArgumentError( + "invalid option string %r: " + "must be at least two characters long" % opt, + self, + ) + elif len(opt) == 2: + if not (opt[0] == "-" and opt[1] != "-"): + raise ArgumentError( + "invalid short option string %r: " + "must be of the form -x, (x any non-dash char)" % opt, + self, + ) + self._short_opts.append(opt) + else: + if not (opt[0:2] == "--" and opt[2] != "-"): + raise ArgumentError( + "invalid long option string %r: " + "must start with --, followed by non-dash" % opt, + self, + ) + self._long_opts.append(opt) + + def __repr__(self) -> str: + args: List[str] = [] + if self._short_opts: + args += ["_short_opts: " + repr(self._short_opts)] + if self._long_opts: + args += ["_long_opts: " + repr(self._long_opts)] + args += ["dest: " + repr(self.dest)] + if hasattr(self, "type"): + args += ["type: " + repr(self.type)] + if hasattr(self, "default"): + args += ["default: " + repr(self.default)] + return "Argument({})".format(", ".join(args)) + + +class OptionGroup: + """A group of options shown in its own section.""" + + def __init__( + self, + name: str, + description: str = "", + parser: Optional[Parser] = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self.name = name + self.description = description + self.options: List[Argument] = [] + self.parser = parser + + def addoption(self, *opts: str, **attrs: Any) -> None: + """Add an option to this group. + + If a shortened version of a long option is specified, it will + be suppressed in the help. ``addoption('--twowords', '--two-words')`` + results in help showing ``--two-words`` only, but ``--twowords`` gets + accepted **and** the automatic destination is in ``args.twowords``. + + :param opts: + Option names, can be short or long options. + :param attrs: + Same attributes as the argparse library's :py:func:`add_argument() + ` function accepts. + """ + conflict = set(opts).intersection( + name for opt in self.options for name in opt.names() + ) + if conflict: + raise ValueError("option names %s already added" % conflict) + option = Argument(*opts, **attrs) + self._addoption_instance(option, shortupper=False) + + def _addoption(self, *opts: str, **attrs: Any) -> None: + option = Argument(*opts, **attrs) + self._addoption_instance(option, shortupper=True) + + def _addoption_instance(self, option: "Argument", shortupper: bool = False) -> None: + if not shortupper: + for opt in option._short_opts: + if opt[0] == "-" and opt[1].islower(): + raise ValueError("lowercase shortoptions reserved") + if self.parser: + self.parser.processoption(option) + self.options.append(option) + + +class MyOptionParser(argparse.ArgumentParser): + def __init__( + self, + parser: Parser, + extra_info: Optional[Dict[str, Any]] = None, + prog: Optional[str] = None, + ) -> None: + self._parser = parser + super().__init__( + prog=prog, + usage=parser._usage, + add_help=False, + formatter_class=DropShorterLongHelpFormatter, + allow_abbrev=False, + ) + # extra_info is a dict of (param -> value) to display if there's + # an usage error to provide more contextual information to the user. + self.extra_info = extra_info if extra_info else {} + + def error(self, message: str) -> NoReturn: + """Transform argparse error message into UsageError.""" + msg = f"{self.prog}: error: {message}" + + if hasattr(self._parser, "_config_source_hint"): + # Type ignored because the attribute is set dynamically. + msg = f"{msg} ({self._parser._config_source_hint})" # type: ignore + + raise UsageError(self.format_usage() + msg) + + # Type ignored because typeshed has a very complex type in the superclass. + def parse_args( # type: ignore + self, + args: Optional[Sequence[str]] = None, + namespace: Optional[argparse.Namespace] = None, + ) -> argparse.Namespace: + """Allow splitting of positional arguments.""" + parsed, unrecognized = self.parse_known_args(args, namespace) + if unrecognized: + for arg in unrecognized: + if arg and arg[0] == "-": + lines = ["unrecognized arguments: %s" % (" ".join(unrecognized))] + for k, v in sorted(self.extra_info.items()): + lines.append(f" {k}: {v}") + self.error("\n".join(lines)) + getattr(parsed, FILE_OR_DIR).extend(unrecognized) + return parsed + + if sys.version_info[:2] < (3, 9): # pragma: no cover + # Backport of https://github.com/python/cpython/pull/14316 so we can + # disable long --argument abbreviations without breaking short flags. + def _parse_optional( + self, arg_string: str + ) -> Optional[Tuple[Optional[argparse.Action], str, Optional[str]]]: + if not arg_string: + return None + if not arg_string[0] in self.prefix_chars: + return None + if arg_string in self._option_string_actions: + action = self._option_string_actions[arg_string] + return action, arg_string, None + if len(arg_string) == 1: + return None + if "=" in arg_string: + option_string, explicit_arg = arg_string.split("=", 1) + if option_string in self._option_string_actions: + action = self._option_string_actions[option_string] + return action, option_string, explicit_arg + if self.allow_abbrev or not arg_string.startswith("--"): + option_tuples = self._get_option_tuples(arg_string) + if len(option_tuples) > 1: + msg = gettext( + "ambiguous option: %(option)s could match %(matches)s" + ) + options = ", ".join(option for _, option, _ in option_tuples) + self.error(msg % {"option": arg_string, "matches": options}) + elif len(option_tuples) == 1: + (option_tuple,) = option_tuples + return option_tuple + if self._negative_number_matcher.match(arg_string): + if not self._has_negative_number_optionals: + return None + if " " in arg_string: + return None + return None, arg_string, None + + +class DropShorterLongHelpFormatter(argparse.HelpFormatter): + """Shorten help for long options that differ only in extra hyphens. + + - Collapse **long** options that are the same except for extra hyphens. + - Shortcut if there are only two options and one of them is a short one. + - Cache result on the action object as this is called at least 2 times. + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + # Use more accurate terminal width. + if "width" not in kwargs: + kwargs["width"] = _pytest._io.get_terminal_width() + super().__init__(*args, **kwargs) + + def _format_action_invocation(self, action: argparse.Action) -> str: + orgstr = super()._format_action_invocation(action) + if orgstr and orgstr[0] != "-": # only optional arguments + return orgstr + res: Optional[str] = getattr(action, "_formatted_action_invocation", None) + if res: + return res + options = orgstr.split(", ") + if len(options) == 2 and (len(options[0]) == 2 or len(options[1]) == 2): + # a shortcut for '-h, --help' or '--abc', '-a' + action._formatted_action_invocation = orgstr # type: ignore + return orgstr + return_list = [] + short_long: Dict[str, str] = {} + for option in options: + if len(option) == 2 or option[2] == " ": + continue + if not option.startswith("--"): + raise ArgumentError( + 'long optional argument without "--": [%s]' % (option), option + ) + xxoption = option[2:] + shortened = xxoption.replace("-", "") + if shortened not in short_long or len(short_long[shortened]) < len( + xxoption + ): + short_long[shortened] = xxoption + # now short_long has been filled out to the longest with dashes + # **and** we keep the right option ordering from add_argument + for option in options: + if len(option) == 2 or option[2] == " ": + return_list.append(option) + if option[2:] == short_long.get(option.replace("-", "")): + return_list.append(option.replace(" ", "=", 1)) + formatted_action_invocation = ", ".join(return_list) + action._formatted_action_invocation = formatted_action_invocation # type: ignore + return formatted_action_invocation + + def _split_lines(self, text, width): + """Wrap lines after splitting on original newlines. + + This allows to have explicit line breaks in the help text. + """ + import textwrap + + lines = [] + for line in text.splitlines(): + lines.extend(textwrap.wrap(line.strip(), width)) + return lines diff --git a/venv/lib/python3.12/site-packages/_pytest/config/compat.py b/venv/lib/python3.12/site-packages/_pytest/config/compat.py new file mode 100644 index 0000000000..5bd922a4a8 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/config/compat.py @@ -0,0 +1,70 @@ +import functools +import warnings +from pathlib import Path +from typing import Optional + +from ..compat import LEGACY_PATH +from ..compat import legacy_path +from ..deprecated import HOOK_LEGACY_PATH_ARG +from _pytest.nodes import _check_path + +# hookname: (Path, LEGACY_PATH) +imply_paths_hooks = { + "pytest_ignore_collect": ("collection_path", "path"), + "pytest_collect_file": ("file_path", "path"), + "pytest_pycollect_makemodule": ("module_path", "path"), + "pytest_report_header": ("start_path", "startdir"), + "pytest_report_collectionfinish": ("start_path", "startdir"), +} + + +class PathAwareHookProxy: + """ + this helper wraps around hook callers + until pluggy supports fixingcalls, this one will do + + it currently doesn't return full hook caller proxies for fixed hooks, + this may have to be changed later depending on bugs + """ + + def __init__(self, hook_caller): + self.__hook_caller = hook_caller + + def __dir__(self): + return dir(self.__hook_caller) + + def __getattr__(self, key, _wraps=functools.wraps): + hook = getattr(self.__hook_caller, key) + if key not in imply_paths_hooks: + self.__dict__[key] = hook + return hook + else: + path_var, fspath_var = imply_paths_hooks[key] + + @_wraps(hook) + def fixed_hook(**kw): + path_value: Optional[Path] = kw.pop(path_var, None) + fspath_value: Optional[LEGACY_PATH] = kw.pop(fspath_var, None) + if fspath_value is not None: + warnings.warn( + HOOK_LEGACY_PATH_ARG.format( + pylib_path_arg=fspath_var, pathlib_path_arg=path_var + ), + stacklevel=2, + ) + if path_value is not None: + if fspath_value is not None: + _check_path(path_value, fspath_value) + else: + fspath_value = legacy_path(path_value) + else: + assert fspath_value is not None + path_value = Path(fspath_value) + + kw[path_var] = path_value + kw[fspath_var] = fspath_value + return hook(**kw) + + fixed_hook.__name__ = key + self.__dict__[key] = fixed_hook + return fixed_hook diff --git a/venv/lib/python3.12/site-packages/_pytest/config/exceptions.py b/venv/lib/python3.12/site-packages/_pytest/config/exceptions.py new file mode 100644 index 0000000000..4f1320e758 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/config/exceptions.py @@ -0,0 +1,11 @@ +from _pytest.compat import final + + +@final +class UsageError(Exception): + """Error in pytest usage or invocation.""" + + +class PrintHelp(Exception): + """Raised when pytest should print its help to skip the rest of the + argument parsing and validation.""" diff --git a/venv/lib/python3.12/site-packages/_pytest/config/findpaths.py b/venv/lib/python3.12/site-packages/_pytest/config/findpaths.py new file mode 100644 index 0000000000..02674ffae3 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/config/findpaths.py @@ -0,0 +1,218 @@ +import os +import sys +from pathlib import Path +from typing import Dict +from typing import Iterable +from typing import List +from typing import Optional +from typing import Sequence +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union + +import iniconfig + +from .exceptions import UsageError +from _pytest.outcomes import fail +from _pytest.pathlib import absolutepath +from _pytest.pathlib import commonpath +from _pytest.pathlib import safe_exists + +if TYPE_CHECKING: + from . import Config + + +def _parse_ini_config(path: Path) -> iniconfig.IniConfig: + """Parse the given generic '.ini' file using legacy IniConfig parser, returning + the parsed object. + + Raise UsageError if the file cannot be parsed. + """ + try: + return iniconfig.IniConfig(str(path)) + except iniconfig.ParseError as exc: + raise UsageError(str(exc)) from exc + + +def load_config_dict_from_file( + filepath: Path, +) -> Optional[Dict[str, Union[str, List[str]]]]: + """Load pytest configuration from the given file path, if supported. + + Return None if the file does not contain valid pytest configuration. + """ + + # Configuration from ini files are obtained from the [pytest] section, if present. + if filepath.suffix == ".ini": + iniconfig = _parse_ini_config(filepath) + + if "pytest" in iniconfig: + return dict(iniconfig["pytest"].items()) + else: + # "pytest.ini" files are always the source of configuration, even if empty. + if filepath.name == "pytest.ini": + return {} + + # '.cfg' files are considered if they contain a "[tool:pytest]" section. + elif filepath.suffix == ".cfg": + iniconfig = _parse_ini_config(filepath) + + if "tool:pytest" in iniconfig.sections: + return dict(iniconfig["tool:pytest"].items()) + elif "pytest" in iniconfig.sections: + # If a setup.cfg contains a "[pytest]" section, we raise a failure to indicate users that + # plain "[pytest]" sections in setup.cfg files is no longer supported (#3086). + fail(CFG_PYTEST_SECTION.format(filename="setup.cfg"), pytrace=False) + + # '.toml' files are considered if they contain a [tool.pytest.ini_options] table. + elif filepath.suffix == ".toml": + if sys.version_info >= (3, 11): + import tomllib + else: + import tomli as tomllib + + toml_text = filepath.read_text(encoding="utf-8") + try: + config = tomllib.loads(toml_text) + except tomllib.TOMLDecodeError as exc: + raise UsageError(f"{filepath}: {exc}") from exc + + result = config.get("tool", {}).get("pytest", {}).get("ini_options", None) + if result is not None: + # TOML supports richer data types than ini files (strings, arrays, floats, ints, etc), + # however we need to convert all scalar values to str for compatibility with the rest + # of the configuration system, which expects strings only. + def make_scalar(v: object) -> Union[str, List[str]]: + return v if isinstance(v, list) else str(v) + + return {k: make_scalar(v) for k, v in result.items()} + + return None + + +def locate_config( + args: Iterable[Path], +) -> Tuple[Optional[Path], Optional[Path], Dict[str, Union[str, List[str]]]]: + """Search in the list of arguments for a valid ini-file for pytest, + and return a tuple of (rootdir, inifile, cfg-dict).""" + config_names = [ + "pytest.ini", + ".pytest.ini", + "pyproject.toml", + "tox.ini", + "setup.cfg", + ] + args = [x for x in args if not str(x).startswith("-")] + if not args: + args = [Path.cwd()] + for arg in args: + argpath = absolutepath(arg) + for base in (argpath, *argpath.parents): + for config_name in config_names: + p = base / config_name + if p.is_file(): + ini_config = load_config_dict_from_file(p) + if ini_config is not None: + return base, p, ini_config + return None, None, {} + + +def get_common_ancestor(paths: Iterable[Path]) -> Path: + common_ancestor: Optional[Path] = None + for path in paths: + if not path.exists(): + continue + if common_ancestor is None: + common_ancestor = path + else: + if common_ancestor in path.parents or path == common_ancestor: + continue + elif path in common_ancestor.parents: + common_ancestor = path + else: + shared = commonpath(path, common_ancestor) + if shared is not None: + common_ancestor = shared + if common_ancestor is None: + common_ancestor = Path.cwd() + elif common_ancestor.is_file(): + common_ancestor = common_ancestor.parent + return common_ancestor + + +def get_dirs_from_args(args: Iterable[str]) -> List[Path]: + def is_option(x: str) -> bool: + return x.startswith("-") + + def get_file_part_from_node_id(x: str) -> str: + return x.split("::")[0] + + def get_dir_from_path(path: Path) -> Path: + if path.is_dir(): + return path + return path.parent + + # These look like paths but may not exist + possible_paths = ( + absolutepath(get_file_part_from_node_id(arg)) + for arg in args + if not is_option(arg) + ) + + return [get_dir_from_path(path) for path in possible_paths if safe_exists(path)] + + +CFG_PYTEST_SECTION = "[pytest] section in {filename} files is no longer supported, change to [tool:pytest] instead." + + +def determine_setup( + inifile: Optional[str], + args: Sequence[str], + rootdir_cmd_arg: Optional[str] = None, + config: Optional["Config"] = None, +) -> Tuple[Path, Optional[Path], Dict[str, Union[str, List[str]]]]: + rootdir = None + dirs = get_dirs_from_args(args) + if inifile: + inipath_ = absolutepath(inifile) + inipath: Optional[Path] = inipath_ + inicfg = load_config_dict_from_file(inipath_) or {} + if rootdir_cmd_arg is None: + rootdir = inipath_.parent + else: + ancestor = get_common_ancestor(dirs) + rootdir, inipath, inicfg = locate_config([ancestor]) + if rootdir is None and rootdir_cmd_arg is None: + for possible_rootdir in (ancestor, *ancestor.parents): + if (possible_rootdir / "setup.py").is_file(): + rootdir = possible_rootdir + break + else: + if dirs != [ancestor]: + rootdir, inipath, inicfg = locate_config(dirs) + if rootdir is None: + if config is not None: + cwd = config.invocation_params.dir + else: + cwd = Path.cwd() + rootdir = get_common_ancestor([cwd, ancestor]) + if is_fs_root(rootdir): + rootdir = ancestor + if rootdir_cmd_arg: + rootdir = absolutepath(os.path.expandvars(rootdir_cmd_arg)) + if not rootdir.is_dir(): + raise UsageError( + "Directory '{}' not found. Check your '--rootdir' option.".format( + rootdir + ) + ) + assert rootdir is not None + return rootdir, inipath, inicfg or {} + + +def is_fs_root(p: Path) -> bool: + r""" + Return True if the given path is pointing to the root of the + file system ("/" on Unix and "C:\\" on Windows for example). + """ + return os.path.splitdrive(str(p))[1] == os.sep diff --git a/venv/lib/python3.12/site-packages/_pytest/debugging.py b/venv/lib/python3.12/site-packages/_pytest/debugging.py new file mode 100644 index 0000000000..a3f80802ca --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/debugging.py @@ -0,0 +1,391 @@ +"""Interactive debugging with PDB, the Python Debugger.""" +import argparse +import functools +import sys +import types +import unittest +from typing import Any +from typing import Callable +from typing import Generator +from typing import List +from typing import Optional +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import Union + +from _pytest import outcomes +from _pytest._code import ExceptionInfo +from _pytest.config import Config +from _pytest.config import ConftestImportFailure +from _pytest.config import hookimpl +from _pytest.config import PytestPluginManager +from _pytest.config.argparsing import Parser +from _pytest.config.exceptions import UsageError +from _pytest.nodes import Node +from _pytest.reports import BaseReport + +if TYPE_CHECKING: + from _pytest.capture import CaptureManager + from _pytest.runner import CallInfo + + +def _validate_usepdb_cls(value: str) -> Tuple[str, str]: + """Validate syntax of --pdbcls option.""" + try: + modname, classname = value.split(":") + except ValueError as e: + raise argparse.ArgumentTypeError( + f"{value!r} is not in the format 'modname:classname'" + ) from e + return (modname, classname) + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group._addoption( + "--pdb", + dest="usepdb", + action="store_true", + help="Start the interactive Python debugger on errors or KeyboardInterrupt", + ) + group._addoption( + "--pdbcls", + dest="usepdb_cls", + metavar="modulename:classname", + type=_validate_usepdb_cls, + help="Specify a custom interactive Python debugger for use with --pdb." + "For example: --pdbcls=IPython.terminal.debugger:TerminalPdb", + ) + group._addoption( + "--trace", + dest="trace", + action="store_true", + help="Immediately break when running each test", + ) + + +def pytest_configure(config: Config) -> None: + import pdb + + if config.getvalue("trace"): + config.pluginmanager.register(PdbTrace(), "pdbtrace") + if config.getvalue("usepdb"): + config.pluginmanager.register(PdbInvoke(), "pdbinvoke") + + pytestPDB._saved.append( + (pdb.set_trace, pytestPDB._pluginmanager, pytestPDB._config) + ) + pdb.set_trace = pytestPDB.set_trace + pytestPDB._pluginmanager = config.pluginmanager + pytestPDB._config = config + + # NOTE: not using pytest_unconfigure, since it might get called although + # pytest_configure was not (if another plugin raises UsageError). + def fin() -> None: + ( + pdb.set_trace, + pytestPDB._pluginmanager, + pytestPDB._config, + ) = pytestPDB._saved.pop() + + config.add_cleanup(fin) + + +class pytestPDB: + """Pseudo PDB that defers to the real pdb.""" + + _pluginmanager: Optional[PytestPluginManager] = None + _config: Optional[Config] = None + _saved: List[ + Tuple[Callable[..., None], Optional[PytestPluginManager], Optional[Config]] + ] = [] + _recursive_debug = 0 + _wrapped_pdb_cls: Optional[Tuple[Type[Any], Type[Any]]] = None + + @classmethod + def _is_capturing(cls, capman: Optional["CaptureManager"]) -> Union[str, bool]: + if capman: + return capman.is_capturing() + return False + + @classmethod + def _import_pdb_cls(cls, capman: Optional["CaptureManager"]): + if not cls._config: + import pdb + + # Happens when using pytest.set_trace outside of a test. + return pdb.Pdb + + usepdb_cls = cls._config.getvalue("usepdb_cls") + + if cls._wrapped_pdb_cls and cls._wrapped_pdb_cls[0] == usepdb_cls: + return cls._wrapped_pdb_cls[1] + + if usepdb_cls: + modname, classname = usepdb_cls + + try: + __import__(modname) + mod = sys.modules[modname] + + # Handle --pdbcls=pdb:pdb.Pdb (useful e.g. with pdbpp). + parts = classname.split(".") + pdb_cls = getattr(mod, parts[0]) + for part in parts[1:]: + pdb_cls = getattr(pdb_cls, part) + except Exception as exc: + value = ":".join((modname, classname)) + raise UsageError( + f"--pdbcls: could not import {value!r}: {exc}" + ) from exc + else: + import pdb + + pdb_cls = pdb.Pdb + + wrapped_cls = cls._get_pdb_wrapper_class(pdb_cls, capman) + cls._wrapped_pdb_cls = (usepdb_cls, wrapped_cls) + return wrapped_cls + + @classmethod + def _get_pdb_wrapper_class(cls, pdb_cls, capman: Optional["CaptureManager"]): + import _pytest.config + + # Type ignored because mypy doesn't support "dynamic" + # inheritance like this. + class PytestPdbWrapper(pdb_cls): # type: ignore[valid-type,misc] + _pytest_capman = capman + _continued = False + + def do_debug(self, arg): + cls._recursive_debug += 1 + ret = super().do_debug(arg) + cls._recursive_debug -= 1 + return ret + + def do_continue(self, arg): + ret = super().do_continue(arg) + if cls._recursive_debug == 0: + assert cls._config is not None + tw = _pytest.config.create_terminal_writer(cls._config) + tw.line() + + capman = self._pytest_capman + capturing = pytestPDB._is_capturing(capman) + if capturing: + if capturing == "global": + tw.sep(">", "PDB continue (IO-capturing resumed)") + else: + tw.sep( + ">", + "PDB continue (IO-capturing resumed for %s)" + % capturing, + ) + assert capman is not None + capman.resume() + else: + tw.sep(">", "PDB continue") + assert cls._pluginmanager is not None + cls._pluginmanager.hook.pytest_leave_pdb(config=cls._config, pdb=self) + self._continued = True + return ret + + do_c = do_cont = do_continue + + def do_quit(self, arg): + """Raise Exit outcome when quit command is used in pdb. + + This is a bit of a hack - it would be better if BdbQuit + could be handled, but this would require to wrap the + whole pytest run, and adjust the report etc. + """ + ret = super().do_quit(arg) + + if cls._recursive_debug == 0: + outcomes.exit("Quitting debugger") + + return ret + + do_q = do_quit + do_exit = do_quit + + def setup(self, f, tb): + """Suspend on setup(). + + Needed after do_continue resumed, and entering another + breakpoint again. + """ + ret = super().setup(f, tb) + if not ret and self._continued: + # pdb.setup() returns True if the command wants to exit + # from the interaction: do not suspend capturing then. + if self._pytest_capman: + self._pytest_capman.suspend_global_capture(in_=True) + return ret + + def get_stack(self, f, t): + stack, i = super().get_stack(f, t) + if f is None: + # Find last non-hidden frame. + i = max(0, len(stack) - 1) + while i and stack[i][0].f_locals.get("__tracebackhide__", False): + i -= 1 + return stack, i + + return PytestPdbWrapper + + @classmethod + def _init_pdb(cls, method, *args, **kwargs): + """Initialize PDB debugging, dropping any IO capturing.""" + import _pytest.config + + if cls._pluginmanager is None: + capman: Optional[CaptureManager] = None + else: + capman = cls._pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend(in_=True) + + if cls._config: + tw = _pytest.config.create_terminal_writer(cls._config) + tw.line() + + if cls._recursive_debug == 0: + # Handle header similar to pdb.set_trace in py37+. + header = kwargs.pop("header", None) + if header is not None: + tw.sep(">", header) + else: + capturing = cls._is_capturing(capman) + if capturing == "global": + tw.sep(">", f"PDB {method} (IO-capturing turned off)") + elif capturing: + tw.sep( + ">", + "PDB %s (IO-capturing turned off for %s)" + % (method, capturing), + ) + else: + tw.sep(">", f"PDB {method}") + + _pdb = cls._import_pdb_cls(capman)(**kwargs) + + if cls._pluginmanager: + cls._pluginmanager.hook.pytest_enter_pdb(config=cls._config, pdb=_pdb) + return _pdb + + @classmethod + def set_trace(cls, *args, **kwargs) -> None: + """Invoke debugging via ``Pdb.set_trace``, dropping any IO capturing.""" + frame = sys._getframe().f_back + _pdb = cls._init_pdb("set_trace", *args, **kwargs) + _pdb.set_trace(frame) + + +class PdbInvoke: + def pytest_exception_interact( + self, node: Node, call: "CallInfo[Any]", report: BaseReport + ) -> None: + capman = node.config.pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture(in_=True) + out, err = capman.read_global_capture() + sys.stdout.write(out) + sys.stdout.write(err) + assert call.excinfo is not None + + if not isinstance(call.excinfo.value, unittest.SkipTest): + _enter_pdb(node, call.excinfo, report) + + def pytest_internalerror(self, excinfo: ExceptionInfo[BaseException]) -> None: + tb = _postmortem_traceback(excinfo) + post_mortem(tb) + + +class PdbTrace: + @hookimpl(hookwrapper=True) + def pytest_pyfunc_call(self, pyfuncitem) -> Generator[None, None, None]: + wrap_pytest_function_for_tracing(pyfuncitem) + yield + + +def wrap_pytest_function_for_tracing(pyfuncitem): + """Change the Python function object of the given Function item by a + wrapper which actually enters pdb before calling the python function + itself, effectively leaving the user in the pdb prompt in the first + statement of the function.""" + _pdb = pytestPDB._init_pdb("runcall") + testfunction = pyfuncitem.obj + + # we can't just return `partial(pdb.runcall, testfunction)` because (on + # python < 3.7.4) runcall's first param is `func`, which means we'd get + # an exception if one of the kwargs to testfunction was called `func`. + @functools.wraps(testfunction) + def wrapper(*args, **kwargs): + func = functools.partial(testfunction, *args, **kwargs) + _pdb.runcall(func) + + pyfuncitem.obj = wrapper + + +def maybe_wrap_pytest_function_for_tracing(pyfuncitem): + """Wrap the given pytestfunct item for tracing support if --trace was given in + the command line.""" + if pyfuncitem.config.getvalue("trace"): + wrap_pytest_function_for_tracing(pyfuncitem) + + +def _enter_pdb( + node: Node, excinfo: ExceptionInfo[BaseException], rep: BaseReport +) -> BaseReport: + # XXX we re-use the TerminalReporter's terminalwriter + # because this seems to avoid some encoding related troubles + # for not completely clear reasons. + tw = node.config.pluginmanager.getplugin("terminalreporter")._tw + tw.line() + + showcapture = node.config.option.showcapture + + for sectionname, content in ( + ("stdout", rep.capstdout), + ("stderr", rep.capstderr), + ("log", rep.caplog), + ): + if showcapture in (sectionname, "all") and content: + tw.sep(">", "captured " + sectionname) + if content[-1:] == "\n": + content = content[:-1] + tw.line(content) + + tw.sep(">", "traceback") + rep.toterminal(tw) + tw.sep(">", "entering PDB") + tb = _postmortem_traceback(excinfo) + rep._pdbshown = True # type: ignore[attr-defined] + post_mortem(tb) + return rep + + +def _postmortem_traceback(excinfo: ExceptionInfo[BaseException]) -> types.TracebackType: + from doctest import UnexpectedException + + if isinstance(excinfo.value, UnexpectedException): + # A doctest.UnexpectedException is not useful for post_mortem. + # Use the underlying exception instead: + return excinfo.value.exc_info[2] + elif isinstance(excinfo.value, ConftestImportFailure): + # A config.ConftestImportFailure is not useful for post_mortem. + # Use the underlying exception instead: + return excinfo.value.excinfo[2] + else: + assert excinfo._excinfo is not None + return excinfo._excinfo[2] + + +def post_mortem(t: types.TracebackType) -> None: + p = pytestPDB._init_pdb("post_mortem") + p.reset() + p.interaction(None, t) + if p.quitting: + outcomes.exit("Quitting debugger") diff --git a/venv/lib/python3.12/site-packages/_pytest/deprecated.py b/venv/lib/python3.12/site-packages/_pytest/deprecated.py new file mode 100644 index 0000000000..b9c10df7a0 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/deprecated.py @@ -0,0 +1,146 @@ +"""Deprecation messages and bits of code used elsewhere in the codebase that +is planned to be removed in the next pytest release. + +Keeping it in a central location makes it easy to track what is deprecated and should +be removed when the time comes. + +All constants defined in this module should be either instances of +:class:`PytestWarning`, or :class:`UnformattedWarning` +in case of warnings which need to format their messages. +""" +from warnings import warn + +from _pytest.warning_types import PytestDeprecationWarning +from _pytest.warning_types import PytestRemovedIn8Warning +from _pytest.warning_types import UnformattedWarning + +# set of plugins which have been integrated into the core; we use this list to ignore +# them during registration to avoid conflicts +DEPRECATED_EXTERNAL_PLUGINS = { + "pytest_catchlog", + "pytest_capturelog", + "pytest_faulthandler", +} + +NOSE_SUPPORT = UnformattedWarning( + PytestRemovedIn8Warning, + "Support for nose tests is deprecated and will be removed in a future release.\n" + "{nodeid} is using nose method: `{method}` ({stage})\n" + "See docs: https://docs.pytest.org/en/stable/deprecations.html#support-for-tests-written-for-nose", +) + +NOSE_SUPPORT_METHOD = UnformattedWarning( + PytestRemovedIn8Warning, + "Support for nose tests is deprecated and will be removed in a future release.\n" + "{nodeid} is using nose-specific method: `{method}(self)`\n" + "To remove this warning, rename it to `{method}_method(self)`\n" + "See docs: https://docs.pytest.org/en/stable/deprecations.html#support-for-tests-written-for-nose", +) + + +# This can be* removed pytest 8, but it's harmless and common, so no rush to remove. +# * If you're in the future: "could have been". +YIELD_FIXTURE = PytestDeprecationWarning( + "@pytest.yield_fixture is deprecated.\n" + "Use @pytest.fixture instead; they are the same." +) + +WARNING_CMDLINE_PREPARSE_HOOK = PytestRemovedIn8Warning( + "The pytest_cmdline_preparse hook is deprecated and will be removed in a future release. \n" + "Please use pytest_load_initial_conftests hook instead." +) + +FSCOLLECTOR_GETHOOKPROXY_ISINITPATH = PytestRemovedIn8Warning( + "The gethookproxy() and isinitpath() methods of FSCollector and Package are deprecated; " + "use self.session.gethookproxy() and self.session.isinitpath() instead. " +) + +STRICT_OPTION = PytestRemovedIn8Warning( + "The --strict option is deprecated, use --strict-markers instead." +) + +# This deprecation is never really meant to be removed. +PRIVATE = PytestDeprecationWarning("A private pytest class or function was used.") + +ARGUMENT_PERCENT_DEFAULT = PytestRemovedIn8Warning( + 'pytest now uses argparse. "%default" should be changed to "%(default)s"', +) + +ARGUMENT_TYPE_STR_CHOICE = UnformattedWarning( + PytestRemovedIn8Warning, + "`type` argument to addoption() is the string {typ!r}." + " For choices this is optional and can be omitted, " + " but when supplied should be a type (for example `str` or `int`)." + " (options: {names})", +) + +ARGUMENT_TYPE_STR = UnformattedWarning( + PytestRemovedIn8Warning, + "`type` argument to addoption() is the string {typ!r}, " + " but when supplied should be a type (for example `str` or `int`)." + " (options: {names})", +) + + +HOOK_LEGACY_PATH_ARG = UnformattedWarning( + PytestRemovedIn8Warning, + "The ({pylib_path_arg}: py.path.local) argument is deprecated, please use ({pathlib_path_arg}: pathlib.Path)\n" + "see https://docs.pytest.org/en/latest/deprecations.html" + "#py-path-local-arguments-for-hooks-replaced-with-pathlib-path", +) + +NODE_CTOR_FSPATH_ARG = UnformattedWarning( + PytestRemovedIn8Warning, + "The (fspath: py.path.local) argument to {node_type_name} is deprecated. " + "Please use the (path: pathlib.Path) argument instead.\n" + "See https://docs.pytest.org/en/latest/deprecations.html" + "#fspath-argument-for-node-constructors-replaced-with-pathlib-path", +) + +WARNS_NONE_ARG = PytestRemovedIn8Warning( + "Passing None has been deprecated.\n" + "See https://docs.pytest.org/en/latest/how-to/capture-warnings.html" + "#additional-use-cases-of-warnings-in-tests" + " for alternatives in common use cases." +) + +KEYWORD_MSG_ARG = UnformattedWarning( + PytestRemovedIn8Warning, + "pytest.{func}(msg=...) is now deprecated, use pytest.{func}(reason=...) instead", +) + +INSTANCE_COLLECTOR = PytestRemovedIn8Warning( + "The pytest.Instance collector type is deprecated and is no longer used. " + "See https://docs.pytest.org/en/latest/deprecations.html#the-pytest-instance-collector", +) +HOOK_LEGACY_MARKING = UnformattedWarning( + PytestDeprecationWarning, + "The hook{type} {fullname} uses old-style configuration options (marks or attributes).\n" + "Please use the pytest.hook{type}({hook_opts}) decorator instead\n" + " to configure the hooks.\n" + " See https://docs.pytest.org/en/latest/deprecations.html" + "#configuring-hook-specs-impls-using-markers", +) + +# You want to make some `__init__` or function "private". +# +# def my_private_function(some, args): +# ... +# +# Do this: +# +# def my_private_function(some, args, *, _ispytest: bool = False): +# check_ispytest(_ispytest) +# ... +# +# Change all internal/allowed calls to +# +# my_private_function(some, args, _ispytest=True) +# +# All other calls will get the default _ispytest=False and trigger +# the warning (possibly error in the future). + + +def check_ispytest(ispytest: bool) -> None: + if not ispytest: + warn(PRIVATE, stacklevel=3) diff --git a/venv/lib/python3.12/site-packages/_pytest/doctest.py b/venv/lib/python3.12/site-packages/_pytest/doctest.py new file mode 100644 index 0000000000..ca41a98ea9 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/doctest.py @@ -0,0 +1,771 @@ +"""Discover and run doctests in modules and test files.""" +import bdb +import functools +import inspect +import os +import platform +import sys +import traceback +import types +import warnings +from contextlib import contextmanager +from pathlib import Path +from typing import Any +from typing import Callable +from typing import Dict +from typing import Generator +from typing import Iterable +from typing import List +from typing import Optional +from typing import Pattern +from typing import Sequence +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import Union + +from _pytest import outcomes +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import ReprFileLocation +from _pytest._code.code import TerminalRepr +from _pytest._io import TerminalWriter +from _pytest.compat import safe_getattr +from _pytest.config import Config +from _pytest.config.argparsing import Parser +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.outcomes import OutcomeException +from _pytest.outcomes import skip +from _pytest.pathlib import fnmatch_ex +from _pytest.pathlib import import_path +from _pytest.python import Module +from _pytest.python_api import approx +from _pytest.warning_types import PytestWarning + +if TYPE_CHECKING: + import doctest + +DOCTEST_REPORT_CHOICE_NONE = "none" +DOCTEST_REPORT_CHOICE_CDIFF = "cdiff" +DOCTEST_REPORT_CHOICE_NDIFF = "ndiff" +DOCTEST_REPORT_CHOICE_UDIFF = "udiff" +DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = "only_first_failure" + +DOCTEST_REPORT_CHOICES = ( + DOCTEST_REPORT_CHOICE_NONE, + DOCTEST_REPORT_CHOICE_CDIFF, + DOCTEST_REPORT_CHOICE_NDIFF, + DOCTEST_REPORT_CHOICE_UDIFF, + DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE, +) + +# Lazy definition of runner class +RUNNER_CLASS = None +# Lazy definition of output checker class +CHECKER_CLASS: Optional[Type["doctest.OutputChecker"]] = None + + +def pytest_addoption(parser: Parser) -> None: + parser.addini( + "doctest_optionflags", + "Option flags for doctests", + type="args", + default=["ELLIPSIS"], + ) + parser.addini( + "doctest_encoding", "Encoding used for doctest files", default="utf-8" + ) + group = parser.getgroup("collect") + group.addoption( + "--doctest-modules", + action="store_true", + default=False, + help="Run doctests in all .py modules", + dest="doctestmodules", + ) + group.addoption( + "--doctest-report", + type=str.lower, + default="udiff", + help="Choose another output format for diffs on doctest failure", + choices=DOCTEST_REPORT_CHOICES, + dest="doctestreport", + ) + group.addoption( + "--doctest-glob", + action="append", + default=[], + metavar="pat", + help="Doctests file matching pattern, default: test*.txt", + dest="doctestglob", + ) + group.addoption( + "--doctest-ignore-import-errors", + action="store_true", + default=False, + help="Ignore doctest ImportErrors", + dest="doctest_ignore_import_errors", + ) + group.addoption( + "--doctest-continue-on-failure", + action="store_true", + default=False, + help="For a given doctest, continue to run after the first failure", + dest="doctest_continue_on_failure", + ) + + +def pytest_unconfigure() -> None: + global RUNNER_CLASS + + RUNNER_CLASS = None + + +def pytest_collect_file( + file_path: Path, + parent: Collector, +) -> Optional[Union["DoctestModule", "DoctestTextfile"]]: + config = parent.config + if file_path.suffix == ".py": + if config.option.doctestmodules and not any( + (_is_setup_py(file_path), _is_main_py(file_path)) + ): + mod: DoctestModule = DoctestModule.from_parent(parent, path=file_path) + return mod + elif _is_doctest(config, file_path, parent): + txt: DoctestTextfile = DoctestTextfile.from_parent(parent, path=file_path) + return txt + return None + + +def _is_setup_py(path: Path) -> bool: + if path.name != "setup.py": + return False + contents = path.read_bytes() + return b"setuptools" in contents or b"distutils" in contents + + +def _is_doctest(config: Config, path: Path, parent: Collector) -> bool: + if path.suffix in (".txt", ".rst") and parent.session.isinitpath(path): + return True + globs = config.getoption("doctestglob") or ["test*.txt"] + return any(fnmatch_ex(glob, path) for glob in globs) + + +def _is_main_py(path: Path) -> bool: + return path.name == "__main__.py" + + +class ReprFailDoctest(TerminalRepr): + def __init__( + self, reprlocation_lines: Sequence[Tuple[ReprFileLocation, Sequence[str]]] + ) -> None: + self.reprlocation_lines = reprlocation_lines + + def toterminal(self, tw: TerminalWriter) -> None: + for reprlocation, lines in self.reprlocation_lines: + for line in lines: + tw.line(line) + reprlocation.toterminal(tw) + + +class MultipleDoctestFailures(Exception): + def __init__(self, failures: Sequence["doctest.DocTestFailure"]) -> None: + super().__init__() + self.failures = failures + + +def _init_runner_class() -> Type["doctest.DocTestRunner"]: + import doctest + + class PytestDoctestRunner(doctest.DebugRunner): + """Runner to collect failures. + + Note that the out variable in this case is a list instead of a + stdout-like object. + """ + + def __init__( + self, + checker: Optional["doctest.OutputChecker"] = None, + verbose: Optional[bool] = None, + optionflags: int = 0, + continue_on_failure: bool = True, + ) -> None: + super().__init__(checker=checker, verbose=verbose, optionflags=optionflags) + self.continue_on_failure = continue_on_failure + + def report_failure( + self, + out, + test: "doctest.DocTest", + example: "doctest.Example", + got: str, + ) -> None: + failure = doctest.DocTestFailure(test, example, got) + if self.continue_on_failure: + out.append(failure) + else: + raise failure + + def report_unexpected_exception( + self, + out, + test: "doctest.DocTest", + example: "doctest.Example", + exc_info: Tuple[Type[BaseException], BaseException, types.TracebackType], + ) -> None: + if isinstance(exc_info[1], OutcomeException): + raise exc_info[1] + if isinstance(exc_info[1], bdb.BdbQuit): + outcomes.exit("Quitting debugger") + failure = doctest.UnexpectedException(test, example, exc_info) + if self.continue_on_failure: + out.append(failure) + else: + raise failure + + return PytestDoctestRunner + + +def _get_runner( + checker: Optional["doctest.OutputChecker"] = None, + verbose: Optional[bool] = None, + optionflags: int = 0, + continue_on_failure: bool = True, +) -> "doctest.DocTestRunner": + # We need this in order to do a lazy import on doctest + global RUNNER_CLASS + if RUNNER_CLASS is None: + RUNNER_CLASS = _init_runner_class() + # Type ignored because the continue_on_failure argument is only defined on + # PytestDoctestRunner, which is lazily defined so can't be used as a type. + return RUNNER_CLASS( # type: ignore + checker=checker, + verbose=verbose, + optionflags=optionflags, + continue_on_failure=continue_on_failure, + ) + + +class DoctestItem(Item): + def __init__( + self, + name: str, + parent: "Union[DoctestTextfile, DoctestModule]", + runner: Optional["doctest.DocTestRunner"] = None, + dtest: Optional["doctest.DocTest"] = None, + ) -> None: + super().__init__(name, parent) + self.runner = runner + self.dtest = dtest + self.obj = None + self.fixture_request: Optional[FixtureRequest] = None + + @classmethod + def from_parent( # type: ignore + cls, + parent: "Union[DoctestTextfile, DoctestModule]", + *, + name: str, + runner: "doctest.DocTestRunner", + dtest: "doctest.DocTest", + ): + # incompatible signature due to imposed limits on subclass + """The public named constructor.""" + return super().from_parent(name=name, parent=parent, runner=runner, dtest=dtest) + + def setup(self) -> None: + if self.dtest is not None: + self.fixture_request = _setup_fixtures(self) + globs = dict(getfixture=self.fixture_request.getfixturevalue) + for name, value in self.fixture_request.getfixturevalue( + "doctest_namespace" + ).items(): + globs[name] = value + self.dtest.globs.update(globs) + + def runtest(self) -> None: + assert self.dtest is not None + assert self.runner is not None + _check_all_skipped(self.dtest) + self._disable_output_capturing_for_darwin() + failures: List["doctest.DocTestFailure"] = [] + # Type ignored because we change the type of `out` from what + # doctest expects. + self.runner.run(self.dtest, out=failures) # type: ignore[arg-type] + if failures: + raise MultipleDoctestFailures(failures) + + def _disable_output_capturing_for_darwin(self) -> None: + """Disable output capturing. Otherwise, stdout is lost to doctest (#985).""" + if platform.system() != "Darwin": + return + capman = self.config.pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture(in_=True) + out, err = capman.read_global_capture() + sys.stdout.write(out) + sys.stderr.write(err) + + # TODO: Type ignored -- breaks Liskov Substitution. + def repr_failure( # type: ignore[override] + self, + excinfo: ExceptionInfo[BaseException], + ) -> Union[str, TerminalRepr]: + import doctest + + failures: Optional[ + Sequence[Union[doctest.DocTestFailure, doctest.UnexpectedException]] + ] = None + if isinstance( + excinfo.value, (doctest.DocTestFailure, doctest.UnexpectedException) + ): + failures = [excinfo.value] + elif isinstance(excinfo.value, MultipleDoctestFailures): + failures = excinfo.value.failures + + if failures is None: + return super().repr_failure(excinfo) + + reprlocation_lines = [] + for failure in failures: + example = failure.example + test = failure.test + filename = test.filename + if test.lineno is None: + lineno = None + else: + lineno = test.lineno + example.lineno + 1 + message = type(failure).__name__ + # TODO: ReprFileLocation doesn't expect a None lineno. + reprlocation = ReprFileLocation(filename, lineno, message) # type: ignore[arg-type] + checker = _get_checker() + report_choice = _get_report_choice(self.config.getoption("doctestreport")) + if lineno is not None: + assert failure.test.docstring is not None + lines = failure.test.docstring.splitlines(False) + # add line numbers to the left of the error message + assert test.lineno is not None + lines = [ + "%03d %s" % (i + test.lineno + 1, x) for (i, x) in enumerate(lines) + ] + # trim docstring error lines to 10 + lines = lines[max(example.lineno - 9, 0) : example.lineno + 1] + else: + lines = [ + "EXAMPLE LOCATION UNKNOWN, not showing all tests of that example" + ] + indent = ">>>" + for line in example.source.splitlines(): + lines.append(f"??? {indent} {line}") + indent = "..." + if isinstance(failure, doctest.DocTestFailure): + lines += checker.output_difference( + example, failure.got, report_choice + ).split("\n") + else: + inner_excinfo = ExceptionInfo.from_exc_info(failure.exc_info) + lines += ["UNEXPECTED EXCEPTION: %s" % repr(inner_excinfo.value)] + lines += [ + x.strip("\n") for x in traceback.format_exception(*failure.exc_info) + ] + reprlocation_lines.append((reprlocation, lines)) + return ReprFailDoctest(reprlocation_lines) + + def reportinfo(self) -> Tuple[Union["os.PathLike[str]", str], Optional[int], str]: + assert self.dtest is not None + return self.path, self.dtest.lineno, "[doctest] %s" % self.name + + +def _get_flag_lookup() -> Dict[str, int]: + import doctest + + return dict( + DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1, + DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE, + NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE, + ELLIPSIS=doctest.ELLIPSIS, + IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL, + COMPARISON_FLAGS=doctest.COMPARISON_FLAGS, + ALLOW_UNICODE=_get_allow_unicode_flag(), + ALLOW_BYTES=_get_allow_bytes_flag(), + NUMBER=_get_number_flag(), + ) + + +def get_optionflags(parent): + optionflags_str = parent.config.getini("doctest_optionflags") + flag_lookup_table = _get_flag_lookup() + flag_acc = 0 + for flag in optionflags_str: + flag_acc |= flag_lookup_table[flag] + return flag_acc + + +def _get_continue_on_failure(config): + continue_on_failure = config.getvalue("doctest_continue_on_failure") + if continue_on_failure: + # We need to turn off this if we use pdb since we should stop at + # the first failure. + if config.getvalue("usepdb"): + continue_on_failure = False + return continue_on_failure + + +class DoctestTextfile(Module): + obj = None + + def collect(self) -> Iterable[DoctestItem]: + import doctest + + # Inspired by doctest.testfile; ideally we would use it directly, + # but it doesn't support passing a custom checker. + encoding = self.config.getini("doctest_encoding") + text = self.path.read_text(encoding) + filename = str(self.path) + name = self.path.name + globs = {"__name__": "__main__"} + + optionflags = get_optionflags(self) + + runner = _get_runner( + verbose=False, + optionflags=optionflags, + checker=_get_checker(), + continue_on_failure=_get_continue_on_failure(self.config), + ) + + parser = doctest.DocTestParser() + test = parser.get_doctest(text, globs, name, filename, 0) + if test.examples: + yield DoctestItem.from_parent( + self, name=test.name, runner=runner, dtest=test + ) + + +def _check_all_skipped(test: "doctest.DocTest") -> None: + """Raise pytest.skip() if all examples in the given DocTest have the SKIP + option set.""" + import doctest + + all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples) + if all_skipped: + skip("all tests skipped by +SKIP option") + + +def _is_mocked(obj: object) -> bool: + """Return if an object is possibly a mock object by checking the + existence of a highly improbable attribute.""" + return ( + safe_getattr(obj, "pytest_mock_example_attribute_that_shouldnt_exist", None) + is not None + ) + + +@contextmanager +def _patch_unwrap_mock_aware() -> Generator[None, None, None]: + """Context manager which replaces ``inspect.unwrap`` with a version + that's aware of mock objects and doesn't recurse into them.""" + real_unwrap = inspect.unwrap + + def _mock_aware_unwrap( + func: Callable[..., Any], *, stop: Optional[Callable[[Any], Any]] = None + ) -> Any: + try: + if stop is None or stop is _is_mocked: + return real_unwrap(func, stop=_is_mocked) + _stop = stop + return real_unwrap(func, stop=lambda obj: _is_mocked(obj) or _stop(func)) + except Exception as e: + warnings.warn( + "Got %r when unwrapping %r. This is usually caused " + "by a violation of Python's object protocol; see e.g. " + "https://github.com/pytest-dev/pytest/issues/5080" % (e, func), + PytestWarning, + ) + raise + + inspect.unwrap = _mock_aware_unwrap + try: + yield + finally: + inspect.unwrap = real_unwrap + + +class DoctestModule(Module): + def collect(self) -> Iterable[DoctestItem]: + import doctest + + class MockAwareDocTestFinder(doctest.DocTestFinder): + """A hackish doctest finder that overrides stdlib internals to fix a stdlib bug. + + https://github.com/pytest-dev/pytest/issues/3456 + https://bugs.python.org/issue25532 + """ + + def _find_lineno(self, obj, source_lines): + """Doctest code does not take into account `@property`, this + is a hackish way to fix it. https://bugs.python.org/issue17446 + + Wrapped Doctests will need to be unwrapped so the correct + line number is returned. This will be reported upstream. #8796 + """ + if isinstance(obj, property): + obj = getattr(obj, "fget", obj) + + if hasattr(obj, "__wrapped__"): + # Get the main obj in case of it being wrapped + obj = inspect.unwrap(obj) + + # Type ignored because this is a private function. + return super()._find_lineno( # type:ignore[misc] + obj, + source_lines, + ) + + def _find( + self, tests, obj, name, module, source_lines, globs, seen + ) -> None: + if _is_mocked(obj): + return + with _patch_unwrap_mock_aware(): + # Type ignored because this is a private function. + super()._find( # type:ignore[misc] + tests, obj, name, module, source_lines, globs, seen + ) + + if sys.version_info < (3, 13): + + def _from_module(self, module, object): + """`cached_property` objects are never considered a part + of the 'current module'. As such they are skipped by doctest. + Here we override `_from_module` to check the underlying + function instead. https://github.com/python/cpython/issues/107995 + """ + if hasattr(functools, "cached_property") and isinstance( + object, functools.cached_property + ): + object = object.func + + # Type ignored because this is a private function. + return super()._from_module(module, object) # type: ignore[misc] + + else: # pragma: no cover + pass + + if self.path.name == "conftest.py": + module = self.config.pluginmanager._importconftest( + self.path, + self.config.getoption("importmode"), + rootpath=self.config.rootpath, + ) + else: + try: + module = import_path( + self.path, + root=self.config.rootpath, + mode=self.config.getoption("importmode"), + ) + except ImportError: + if self.config.getvalue("doctest_ignore_import_errors"): + skip("unable to import module %r" % self.path) + else: + raise + # Uses internal doctest module parsing mechanism. + finder = MockAwareDocTestFinder() + optionflags = get_optionflags(self) + runner = _get_runner( + verbose=False, + optionflags=optionflags, + checker=_get_checker(), + continue_on_failure=_get_continue_on_failure(self.config), + ) + + for test in finder.find(module, module.__name__): + if test.examples: # skip empty doctests + yield DoctestItem.from_parent( + self, name=test.name, runner=runner, dtest=test + ) + + +def _setup_fixtures(doctest_item: DoctestItem) -> FixtureRequest: + """Used by DoctestTextfile and DoctestItem to setup fixture information.""" + + def func() -> None: + pass + + doctest_item.funcargs = {} # type: ignore[attr-defined] + fm = doctest_item.session._fixturemanager + doctest_item._fixtureinfo = fm.getfixtureinfo( # type: ignore[attr-defined] + node=doctest_item, func=func, cls=None, funcargs=False + ) + fixture_request = FixtureRequest(doctest_item, _ispytest=True) + fixture_request._fillfixtures() + return fixture_request + + +def _init_checker_class() -> Type["doctest.OutputChecker"]: + import doctest + import re + + class LiteralsOutputChecker(doctest.OutputChecker): + # Based on doctest_nose_plugin.py from the nltk project + # (https://github.com/nltk/nltk) and on the "numtest" doctest extension + # by Sebastien Boisgerault (https://github.com/boisgera/numtest). + + _unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE) + _bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE) + _number_re = re.compile( + r""" + (?P + (?P + (?P [+-]?\d*)\.(?P\d+) + | + (?P [+-]?\d+)\. + ) + (?: + [Ee] + (?P [+-]?\d+) + )? + | + (?P [+-]?\d+) + (?: + [Ee] + (?P [+-]?\d+) + ) + ) + """, + re.VERBOSE, + ) + + def check_output(self, want: str, got: str, optionflags: int) -> bool: + if super().check_output(want, got, optionflags): + return True + + allow_unicode = optionflags & _get_allow_unicode_flag() + allow_bytes = optionflags & _get_allow_bytes_flag() + allow_number = optionflags & _get_number_flag() + + if not allow_unicode and not allow_bytes and not allow_number: + return False + + def remove_prefixes(regex: Pattern[str], txt: str) -> str: + return re.sub(regex, r"\1\2", txt) + + if allow_unicode: + want = remove_prefixes(self._unicode_literal_re, want) + got = remove_prefixes(self._unicode_literal_re, got) + + if allow_bytes: + want = remove_prefixes(self._bytes_literal_re, want) + got = remove_prefixes(self._bytes_literal_re, got) + + if allow_number: + got = self._remove_unwanted_precision(want, got) + + return super().check_output(want, got, optionflags) + + def _remove_unwanted_precision(self, want: str, got: str) -> str: + wants = list(self._number_re.finditer(want)) + gots = list(self._number_re.finditer(got)) + if len(wants) != len(gots): + return got + offset = 0 + for w, g in zip(wants, gots): + fraction: Optional[str] = w.group("fraction") + exponent: Optional[str] = w.group("exponent1") + if exponent is None: + exponent = w.group("exponent2") + precision = 0 if fraction is None else len(fraction) + if exponent is not None: + precision -= int(exponent) + if float(w.group()) == approx(float(g.group()), abs=10**-precision): + # They're close enough. Replace the text we actually + # got with the text we want, so that it will match when we + # check the string literally. + got = ( + got[: g.start() + offset] + w.group() + got[g.end() + offset :] + ) + offset += w.end() - w.start() - (g.end() - g.start()) + return got + + return LiteralsOutputChecker + + +def _get_checker() -> "doctest.OutputChecker": + """Return a doctest.OutputChecker subclass that supports some + additional options: + + * ALLOW_UNICODE and ALLOW_BYTES options to ignore u'' and b'' + prefixes (respectively) in string literals. Useful when the same + doctest should run in Python 2 and Python 3. + + * NUMBER to ignore floating-point differences smaller than the + precision of the literal number in the doctest. + + An inner class is used to avoid importing "doctest" at the module + level. + """ + global CHECKER_CLASS + if CHECKER_CLASS is None: + CHECKER_CLASS = _init_checker_class() + return CHECKER_CLASS() + + +def _get_allow_unicode_flag() -> int: + """Register and return the ALLOW_UNICODE flag.""" + import doctest + + return doctest.register_optionflag("ALLOW_UNICODE") + + +def _get_allow_bytes_flag() -> int: + """Register and return the ALLOW_BYTES flag.""" + import doctest + + return doctest.register_optionflag("ALLOW_BYTES") + + +def _get_number_flag() -> int: + """Register and return the NUMBER flag.""" + import doctest + + return doctest.register_optionflag("NUMBER") + + +def _get_report_choice(key: str) -> int: + """Return the actual `doctest` module flag value. + + We want to do it as late as possible to avoid importing `doctest` and all + its dependencies when parsing options, as it adds overhead and breaks tests. + """ + import doctest + + return { + DOCTEST_REPORT_CHOICE_UDIFF: doctest.REPORT_UDIFF, + DOCTEST_REPORT_CHOICE_CDIFF: doctest.REPORT_CDIFF, + DOCTEST_REPORT_CHOICE_NDIFF: doctest.REPORT_NDIFF, + DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE: doctest.REPORT_ONLY_FIRST_FAILURE, + DOCTEST_REPORT_CHOICE_NONE: 0, + }[key] + + +@fixture(scope="session") +def doctest_namespace() -> Dict[str, Any]: + """Fixture that returns a :py:class:`dict` that will be injected into the + namespace of doctests. + + Usually this fixture is used in conjunction with another ``autouse`` fixture: + + .. code-block:: python + + @pytest.fixture(autouse=True) + def add_np(doctest_namespace): + doctest_namespace["np"] = numpy + + For more details: :ref:`doctest_namespace`. + """ + return dict() diff --git a/venv/lib/python3.12/site-packages/_pytest/faulthandler.py b/venv/lib/python3.12/site-packages/_pytest/faulthandler.py new file mode 100644 index 0000000000..d8c7e9fd3b --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/faulthandler.py @@ -0,0 +1,102 @@ +import os +import sys +from typing import Generator + +import pytest +from _pytest.config import Config +from _pytest.config.argparsing import Parser +from _pytest.nodes import Item +from _pytest.stash import StashKey + + +fault_handler_original_stderr_fd_key = StashKey[int]() +fault_handler_stderr_fd_key = StashKey[int]() + + +def pytest_addoption(parser: Parser) -> None: + help = ( + "Dump the traceback of all threads if a test takes " + "more than TIMEOUT seconds to finish" + ) + parser.addini("faulthandler_timeout", help, default=0.0) + + +def pytest_configure(config: Config) -> None: + import faulthandler + + # at teardown we want to restore the original faulthandler fileno + # but faulthandler has no api to return the original fileno + # so here we stash the stderr fileno to be used at teardown + # sys.stderr and sys.__stderr__ may be closed or patched during the session + # so we can't rely on their values being good at that point (#11572). + stderr_fileno = get_stderr_fileno() + if faulthandler.is_enabled(): + config.stash[fault_handler_original_stderr_fd_key] = stderr_fileno + config.stash[fault_handler_stderr_fd_key] = os.dup(stderr_fileno) + faulthandler.enable(file=config.stash[fault_handler_stderr_fd_key]) + + +def pytest_unconfigure(config: Config) -> None: + import faulthandler + + faulthandler.disable() + # Close the dup file installed during pytest_configure. + if fault_handler_stderr_fd_key in config.stash: + os.close(config.stash[fault_handler_stderr_fd_key]) + del config.stash[fault_handler_stderr_fd_key] + # Re-enable the faulthandler if it was originally enabled. + if fault_handler_original_stderr_fd_key in config.stash: + faulthandler.enable(config.stash[fault_handler_original_stderr_fd_key]) + del config.stash[fault_handler_original_stderr_fd_key] + + +def get_stderr_fileno() -> int: + try: + fileno = sys.stderr.fileno() + # The Twisted Logger will return an invalid file descriptor since it is not backed + # by an FD. So, let's also forward this to the same code path as with pytest-xdist. + if fileno == -1: + raise AttributeError() + return fileno + except (AttributeError, ValueError): + # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file. + # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors + # This is potentially dangerous, but the best we can do. + return sys.__stderr__.fileno() + + +def get_timeout_config_value(config: Config) -> float: + return float(config.getini("faulthandler_timeout") or 0.0) + + +@pytest.hookimpl(hookwrapper=True, trylast=True) +def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]: + timeout = get_timeout_config_value(item.config) + if timeout > 0: + import faulthandler + + stderr = item.config.stash[fault_handler_stderr_fd_key] + faulthandler.dump_traceback_later(timeout, file=stderr) + try: + yield + finally: + faulthandler.cancel_dump_traceback_later() + else: + yield + + +@pytest.hookimpl(tryfirst=True) +def pytest_enter_pdb() -> None: + """Cancel any traceback dumping due to timeout before entering pdb.""" + import faulthandler + + faulthandler.cancel_dump_traceback_later() + + +@pytest.hookimpl(tryfirst=True) +def pytest_exception_interact() -> None: + """Cancel any traceback dumping due to an interactive exception being + raised.""" + import faulthandler + + faulthandler.cancel_dump_traceback_later() diff --git a/venv/lib/python3.12/site-packages/_pytest/fixtures.py b/venv/lib/python3.12/site-packages/_pytest/fixtures.py new file mode 100644 index 0000000000..0462504efa --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/fixtures.py @@ -0,0 +1,1713 @@ +import dataclasses +import functools +import inspect +import os +import sys +import warnings +from collections import defaultdict +from collections import deque +from contextlib import suppress +from pathlib import Path +from types import TracebackType +from typing import Any +from typing import Callable +from typing import cast +from typing import Dict +from typing import Generator +from typing import Generic +from typing import Iterable +from typing import Iterator +from typing import List +from typing import MutableMapping +from typing import NoReturn +from typing import Optional +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +import _pytest +from _pytest import nodes +from _pytest._code import getfslineno +from _pytest._code.code import FormattedExcinfo +from _pytest._code.code import TerminalRepr +from _pytest._io import TerminalWriter +from _pytest.compat import _format_args +from _pytest.compat import _PytestWrapper +from _pytest.compat import assert_never +from _pytest.compat import final +from _pytest.compat import get_real_func +from _pytest.compat import get_real_method +from _pytest.compat import getfuncargnames +from _pytest.compat import getimfunc +from _pytest.compat import getlocation +from _pytest.compat import is_generator +from _pytest.compat import NOTSET +from _pytest.compat import NotSetType +from _pytest.compat import overload +from _pytest.compat import safe_getattr +from _pytest.config import _PluggyPlugin +from _pytest.config import Config +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.deprecated import YIELD_FIXTURE +from _pytest.mark import Mark +from _pytest.mark import ParameterSet +from _pytest.mark.structures import MarkDecorator +from _pytest.outcomes import fail +from _pytest.outcomes import skip +from _pytest.outcomes import TEST_OUTCOME +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath +from _pytest.scope import HIGH_SCOPES +from _pytest.scope import Scope +from _pytest.stash import StashKey + + +if TYPE_CHECKING: + from typing import Deque + + from _pytest.scope import _ScopeName + from _pytest.main import Session + from _pytest.python import CallSpec2 + from _pytest.python import Metafunc + + +# The value of the fixture -- return/yield of the fixture function (type variable). +FixtureValue = TypeVar("FixtureValue") +# The type of the fixture function (type variable). +FixtureFunction = TypeVar("FixtureFunction", bound=Callable[..., object]) +# The type of a fixture function (type alias generic in fixture value). +_FixtureFunc = Union[ + Callable[..., FixtureValue], Callable[..., Generator[FixtureValue, None, None]] +] +# The type of FixtureDef.cached_result (type alias generic in fixture value). +_FixtureCachedResult = Union[ + Tuple[ + # The result. + FixtureValue, + # Cache key. + object, + None, + ], + Tuple[ + None, + # Cache key. + object, + # Exc info if raised. + Tuple[Type[BaseException], BaseException, TracebackType], + ], +] + + +@dataclasses.dataclass(frozen=True) +class PseudoFixtureDef(Generic[FixtureValue]): + cached_result: "_FixtureCachedResult[FixtureValue]" + _scope: Scope + + +def pytest_sessionstart(session: "Session") -> None: + session._fixturemanager = FixtureManager(session) + + +def get_scope_package( + node: nodes.Item, + fixturedef: "FixtureDef[object]", +) -> Optional[Union[nodes.Item, nodes.Collector]]: + from _pytest.python import Package + + current: Optional[Union[nodes.Item, nodes.Collector]] = node + fixture_package_name = "{}/{}".format(fixturedef.baseid, "__init__.py") + while current and ( + not isinstance(current, Package) or fixture_package_name != current.nodeid + ): + current = current.parent # type: ignore[assignment] + if current is None: + return node.session + return current + + +def get_scope_node( + node: nodes.Node, scope: Scope +) -> Optional[Union[nodes.Item, nodes.Collector]]: + import _pytest.python + + if scope is Scope.Function: + return node.getparent(nodes.Item) + elif scope is Scope.Class: + return node.getparent(_pytest.python.Class) + elif scope is Scope.Module: + return node.getparent(_pytest.python.Module) + elif scope is Scope.Package: + return node.getparent(_pytest.python.Package) + elif scope is Scope.Session: + return node.getparent(_pytest.main.Session) + else: + assert_never(scope) + + +# Used for storing artificial fixturedefs for direct parametrization. +name2pseudofixturedef_key = StashKey[Dict[str, "FixtureDef[Any]"]]() + + +def add_funcarg_pseudo_fixture_def( + collector: nodes.Collector, metafunc: "Metafunc", fixturemanager: "FixtureManager" +) -> None: + # This function will transform all collected calls to functions + # if they use direct funcargs (i.e. direct parametrization) + # because we want later test execution to be able to rely on + # an existing FixtureDef structure for all arguments. + # XXX we can probably avoid this algorithm if we modify CallSpec2 + # to directly care for creating the fixturedefs within its methods. + if not metafunc._calls[0].funcargs: + # This function call does not have direct parametrization. + return + # Collect funcargs of all callspecs into a list of values. + arg2params: Dict[str, List[object]] = {} + arg2scope: Dict[str, Scope] = {} + for callspec in metafunc._calls: + for argname, argvalue in callspec.funcargs.items(): + assert argname not in callspec.params + callspec.params[argname] = argvalue + arg2params_list = arg2params.setdefault(argname, []) + callspec.indices[argname] = len(arg2params_list) + arg2params_list.append(argvalue) + if argname not in arg2scope: + scope = callspec._arg2scope.get(argname, Scope.Function) + arg2scope[argname] = scope + callspec.funcargs.clear() + + # Register artificial FixtureDef's so that later at test execution + # time we can rely on a proper FixtureDef to exist for fixture setup. + arg2fixturedefs = metafunc._arg2fixturedefs + for argname, valuelist in arg2params.items(): + # If we have a scope that is higher than function, we need + # to make sure we only ever create an according fixturedef on + # a per-scope basis. We thus store and cache the fixturedef on the + # node related to the scope. + scope = arg2scope[argname] + node = None + if scope is not Scope.Function: + node = get_scope_node(collector, scope) + if node is None: + assert scope is Scope.Class and isinstance( + collector, _pytest.python.Module + ) + # Use module-level collector for class-scope (for now). + node = collector + if node is None: + name2pseudofixturedef = None + else: + default: Dict[str, FixtureDef[Any]] = {} + name2pseudofixturedef = node.stash.setdefault( + name2pseudofixturedef_key, default + ) + if name2pseudofixturedef is not None and argname in name2pseudofixturedef: + arg2fixturedefs[argname] = [name2pseudofixturedef[argname]] + else: + fixturedef = FixtureDef( + fixturemanager=fixturemanager, + baseid="", + argname=argname, + func=get_direct_param_fixture_func, + scope=arg2scope[argname], + params=valuelist, + unittest=False, + ids=None, + ) + arg2fixturedefs[argname] = [fixturedef] + if name2pseudofixturedef is not None: + name2pseudofixturedef[argname] = fixturedef + + +def getfixturemarker(obj: object) -> Optional["FixtureFunctionMarker"]: + """Return fixturemarker or None if it doesn't exist or raised + exceptions.""" + return cast( + Optional[FixtureFunctionMarker], + safe_getattr(obj, "_pytestfixturefunction", None), + ) + + +# Parametrized fixture key, helper alias for code below. +_Key = Tuple[object, ...] + + +def get_parametrized_fixture_keys(item: nodes.Item, scope: Scope) -> Iterator[_Key]: + """Return list of keys for all parametrized arguments which match + the specified scope.""" + assert scope is not Scope.Function + try: + callspec = item.callspec # type: ignore[attr-defined] + except AttributeError: + pass + else: + cs: CallSpec2 = callspec + # cs.indices.items() is random order of argnames. Need to + # sort this so that different calls to + # get_parametrized_fixture_keys will be deterministic. + for argname, param_index in sorted(cs.indices.items()): + if cs._arg2scope[argname] != scope: + continue + if scope is Scope.Session: + key: _Key = (argname, param_index) + elif scope is Scope.Package: + key = (argname, param_index, item.path.parent) + elif scope is Scope.Module: + key = (argname, param_index, item.path) + elif scope is Scope.Class: + item_cls = item.cls # type: ignore[attr-defined] + key = (argname, param_index, item.path, item_cls) + else: + assert_never(scope) + yield key + + +# Algorithm for sorting on a per-parametrized resource setup basis. +# It is called for Session scope first and performs sorting +# down to the lower scopes such as to minimize number of "high scope" +# setups and teardowns. + + +def reorder_items(items: Sequence[nodes.Item]) -> List[nodes.Item]: + argkeys_cache: Dict[Scope, Dict[nodes.Item, Dict[_Key, None]]] = {} + items_by_argkey: Dict[Scope, Dict[_Key, Deque[nodes.Item]]] = {} + for scope in HIGH_SCOPES: + d: Dict[nodes.Item, Dict[_Key, None]] = {} + argkeys_cache[scope] = d + item_d: Dict[_Key, Deque[nodes.Item]] = defaultdict(deque) + items_by_argkey[scope] = item_d + for item in items: + keys = dict.fromkeys(get_parametrized_fixture_keys(item, scope), None) + if keys: + d[item] = keys + for key in keys: + item_d[key].append(item) + items_dict = dict.fromkeys(items, None) + return list( + reorder_items_atscope(items_dict, argkeys_cache, items_by_argkey, Scope.Session) + ) + + +def fix_cache_order( + item: nodes.Item, + argkeys_cache: Dict[Scope, Dict[nodes.Item, Dict[_Key, None]]], + items_by_argkey: Dict[Scope, Dict[_Key, "Deque[nodes.Item]"]], +) -> None: + for scope in HIGH_SCOPES: + for key in argkeys_cache[scope].get(item, []): + items_by_argkey[scope][key].appendleft(item) + + +def reorder_items_atscope( + items: Dict[nodes.Item, None], + argkeys_cache: Dict[Scope, Dict[nodes.Item, Dict[_Key, None]]], + items_by_argkey: Dict[Scope, Dict[_Key, "Deque[nodes.Item]"]], + scope: Scope, +) -> Dict[nodes.Item, None]: + if scope is Scope.Function or len(items) < 3: + return items + ignore: Set[Optional[_Key]] = set() + items_deque = deque(items) + items_done: Dict[nodes.Item, None] = {} + scoped_items_by_argkey = items_by_argkey[scope] + scoped_argkeys_cache = argkeys_cache[scope] + while items_deque: + no_argkey_group: Dict[nodes.Item, None] = {} + slicing_argkey = None + while items_deque: + item = items_deque.popleft() + if item in items_done or item in no_argkey_group: + continue + argkeys = dict.fromkeys( + (k for k in scoped_argkeys_cache.get(item, []) if k not in ignore), None + ) + if not argkeys: + no_argkey_group[item] = None + else: + slicing_argkey, _ = argkeys.popitem() + # We don't have to remove relevant items from later in the + # deque because they'll just be ignored. + matching_items = [ + i for i in scoped_items_by_argkey[slicing_argkey] if i in items + ] + for i in reversed(matching_items): + fix_cache_order(i, argkeys_cache, items_by_argkey) + items_deque.appendleft(i) + break + if no_argkey_group: + no_argkey_group = reorder_items_atscope( + no_argkey_group, argkeys_cache, items_by_argkey, scope.next_lower() + ) + for item in no_argkey_group: + items_done[item] = None + ignore.add(slicing_argkey) + return items_done + + +def get_direct_param_fixture_func(request: "FixtureRequest") -> Any: + return request.param + + +@dataclasses.dataclass +class FuncFixtureInfo: + __slots__ = ("argnames", "initialnames", "names_closure", "name2fixturedefs") + + # Original function argument names. + argnames: Tuple[str, ...] + # Argnames that function immediately requires. These include argnames + + # fixture names specified via usefixtures and via autouse=True in fixture + # definitions. + initialnames: Tuple[str, ...] + names_closure: List[str] + name2fixturedefs: Dict[str, Sequence["FixtureDef[Any]"]] + + def prune_dependency_tree(self) -> None: + """Recompute names_closure from initialnames and name2fixturedefs. + + Can only reduce names_closure, which means that the new closure will + always be a subset of the old one. The order is preserved. + + This method is needed because direct parametrization may shadow some + of the fixtures that were included in the originally built dependency + tree. In this way the dependency tree can get pruned, and the closure + of argnames may get reduced. + """ + closure: Set[str] = set() + working_set = set(self.initialnames) + while working_set: + argname = working_set.pop() + # Argname may be smth not included in the original names_closure, + # in which case we ignore it. This currently happens with pseudo + # FixtureDefs which wrap 'get_direct_param_fixture_func(request)'. + # So they introduce the new dependency 'request' which might have + # been missing in the original tree (closure). + if argname not in closure and argname in self.names_closure: + closure.add(argname) + if argname in self.name2fixturedefs: + working_set.update(self.name2fixturedefs[argname][-1].argnames) + + self.names_closure[:] = sorted(closure, key=self.names_closure.index) + + +class FixtureRequest: + """A request for a fixture from a test or fixture function. + + A request object gives access to the requesting test context and has + an optional ``param`` attribute in case the fixture is parametrized + indirectly. + """ + + def __init__(self, pyfuncitem, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + self._pyfuncitem = pyfuncitem + #: Fixture for which this request is being performed. + self.fixturename: Optional[str] = None + self._scope = Scope.Function + self._fixture_defs: Dict[str, FixtureDef[Any]] = {} + fixtureinfo: FuncFixtureInfo = pyfuncitem._fixtureinfo + self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy() + self._arg2index: Dict[str, int] = {} + self._fixturemanager: FixtureManager = pyfuncitem.session._fixturemanager + # Notes on the type of `param`: + # -`request.param` is only defined in parametrized fixtures, and will raise + # AttributeError otherwise. Python typing has no notion of "undefined", so + # this cannot be reflected in the type. + # - Technically `param` is only (possibly) defined on SubRequest, not + # FixtureRequest, but the typing of that is still in flux so this cheats. + # - In the future we might consider using a generic for the param type, but + # for now just using Any. + self.param: Any + + @property + def scope(self) -> "_ScopeName": + """Scope string, one of "function", "class", "module", "package", "session".""" + return self._scope.value + + @property + def fixturenames(self) -> List[str]: + """Names of all active fixtures in this request.""" + result = list(self._pyfuncitem._fixtureinfo.names_closure) + result.extend(set(self._fixture_defs).difference(result)) + return result + + @property + def node(self): + """Underlying collection node (depends on current request scope).""" + scope = self._scope + if scope is Scope.Function: + # This might also be a non-function Item despite its attribute name. + node: Optional[Union[nodes.Item, nodes.Collector]] = self._pyfuncitem + elif scope is Scope.Package: + # FIXME: _fixturedef is not defined on FixtureRequest (this class), + # but on FixtureRequest (a subclass). + node = get_scope_package(self._pyfuncitem, self._fixturedef) # type: ignore[attr-defined] + else: + node = get_scope_node(self._pyfuncitem, scope) + if node is None and scope is Scope.Class: + # Fallback to function item itself. + node = self._pyfuncitem + assert node, 'Could not obtain a node for scope "{}" for function {!r}'.format( + scope, self._pyfuncitem + ) + return node + + def _getnextfixturedef(self, argname: str) -> "FixtureDef[Any]": + fixturedefs = self._arg2fixturedefs.get(argname, None) + if fixturedefs is None: + # We arrive here because of a dynamic call to + # getfixturevalue(argname) usage which was naturally + # not known at parsing/collection time. + assert self._pyfuncitem.parent is not None + parentid = self._pyfuncitem.parent.nodeid + fixturedefs = self._fixturemanager.getfixturedefs(argname, parentid) + # TODO: Fix this type ignore. Either add assert or adjust types. + # Can this be None here? + self._arg2fixturedefs[argname] = fixturedefs # type: ignore[assignment] + # fixturedefs list is immutable so we maintain a decreasing index. + index = self._arg2index.get(argname, 0) - 1 + if fixturedefs is None or (-index > len(fixturedefs)): + raise FixtureLookupError(argname, self) + self._arg2index[argname] = index + return fixturedefs[index] + + @property + def config(self) -> Config: + """The pytest config object associated with this request.""" + return self._pyfuncitem.config # type: ignore[no-any-return] + + @property + def function(self): + """Test function object if the request has a per-function scope.""" + if self.scope != "function": + raise AttributeError( + f"function not available in {self.scope}-scoped context" + ) + return self._pyfuncitem.obj + + @property + def cls(self): + """Class (can be None) where the test function was collected.""" + if self.scope not in ("class", "function"): + raise AttributeError(f"cls not available in {self.scope}-scoped context") + clscol = self._pyfuncitem.getparent(_pytest.python.Class) + if clscol: + return clscol.obj + + @property + def instance(self): + """Instance (can be None) on which test function was collected.""" + # unittest support hack, see _pytest.unittest.TestCaseFunction. + try: + return self._pyfuncitem._testcase + except AttributeError: + function = getattr(self, "function", None) + return getattr(function, "__self__", None) + + @property + def module(self): + """Python module object where the test function was collected.""" + if self.scope not in ("function", "class", "module"): + raise AttributeError(f"module not available in {self.scope}-scoped context") + return self._pyfuncitem.getparent(_pytest.python.Module).obj + + @property + def path(self) -> Path: + """Path where the test function was collected.""" + if self.scope not in ("function", "class", "module", "package"): + raise AttributeError(f"path not available in {self.scope}-scoped context") + # TODO: Remove ignore once _pyfuncitem is properly typed. + return self._pyfuncitem.path # type: ignore + + @property + def keywords(self) -> MutableMapping[str, Any]: + """Keywords/markers dictionary for the underlying node.""" + node: nodes.Node = self.node + return node.keywords + + @property + def session(self) -> "Session": + """Pytest session object.""" + return self._pyfuncitem.session # type: ignore[no-any-return] + + def addfinalizer(self, finalizer: Callable[[], object]) -> None: + """Add finalizer/teardown function to be called without arguments after + the last test within the requesting test context finished execution.""" + # XXX usually this method is shadowed by fixturedef specific ones. + self.node.addfinalizer(finalizer) + + def applymarker(self, marker: Union[str, MarkDecorator]) -> None: + """Apply a marker to a single test function invocation. + + This method is useful if you don't want to have a keyword/marker + on all function invocations. + + :param marker: + An object created by a call to ``pytest.mark.NAME(...)``. + """ + self.node.add_marker(marker) + + def raiseerror(self, msg: Optional[str]) -> NoReturn: + """Raise a FixtureLookupError exception. + + :param msg: + An optional custom error message. + """ + raise self._fixturemanager.FixtureLookupError(None, self, msg) + + def _fillfixtures(self) -> None: + item = self._pyfuncitem + fixturenames = getattr(item, "fixturenames", self.fixturenames) + for argname in fixturenames: + if argname not in item.funcargs: + item.funcargs[argname] = self.getfixturevalue(argname) + + def getfixturevalue(self, argname: str) -> Any: + """Dynamically run a named fixture function. + + Declaring fixtures via function argument is recommended where possible. + But if you can only decide whether to use another fixture at test + setup time, you may use this function to retrieve it inside a fixture + or test function body. + + This method can be used during the test setup phase or the test run + phase, but during the test teardown phase a fixture's value may not + be available. + + :param argname: + The fixture name. + :raises pytest.FixtureLookupError: + If the given fixture could not be found. + """ + fixturedef = self._get_active_fixturedef(argname) + assert fixturedef.cached_result is not None, ( + f'The fixture value for "{argname}" is not available. ' + "This can happen when the fixture has already been torn down." + ) + return fixturedef.cached_result[0] + + def _get_active_fixturedef( + self, argname: str + ) -> Union["FixtureDef[object]", PseudoFixtureDef[object]]: + try: + return self._fixture_defs[argname] + except KeyError: + try: + fixturedef = self._getnextfixturedef(argname) + except FixtureLookupError: + if argname == "request": + cached_result = (self, [0], None) + return PseudoFixtureDef(cached_result, Scope.Function) + raise + # Remove indent to prevent the python3 exception + # from leaking into the call. + self._compute_fixture_value(fixturedef) + self._fixture_defs[argname] = fixturedef + return fixturedef + + def _get_fixturestack(self) -> List["FixtureDef[Any]"]: + current = self + values: List[FixtureDef[Any]] = [] + while isinstance(current, SubRequest): + values.append(current._fixturedef) # type: ignore[has-type] + current = current._parent_request + values.reverse() + return values + + def _compute_fixture_value(self, fixturedef: "FixtureDef[object]") -> None: + """Create a SubRequest based on "self" and call the execute method + of the given FixtureDef object. + + This will force the FixtureDef object to throw away any previous + results and compute a new fixture value, which will be stored into + the FixtureDef object itself. + """ + # prepare a subrequest object before calling fixture function + # (latter managed by fixturedef) + argname = fixturedef.argname + funcitem = self._pyfuncitem + scope = fixturedef._scope + try: + callspec = funcitem.callspec + except AttributeError: + callspec = None + if callspec is not None and argname in callspec.params: + param = callspec.params[argname] + param_index = callspec.indices[argname] + # If a parametrize invocation set a scope it will override + # the static scope defined with the fixture function. + with suppress(KeyError): + scope = callspec._arg2scope[argname] + else: + param = NOTSET + param_index = 0 + has_params = fixturedef.params is not None + fixtures_not_supported = getattr(funcitem, "nofuncargs", False) + if has_params and fixtures_not_supported: + msg = ( + "{name} does not support fixtures, maybe unittest.TestCase subclass?\n" + "Node id: {nodeid}\n" + "Function type: {typename}" + ).format( + name=funcitem.name, + nodeid=funcitem.nodeid, + typename=type(funcitem).__name__, + ) + fail(msg, pytrace=False) + if has_params: + frame = inspect.stack()[3] + frameinfo = inspect.getframeinfo(frame[0]) + source_path = absolutepath(frameinfo.filename) + source_lineno = frameinfo.lineno + try: + source_path_str = str( + source_path.relative_to(funcitem.config.rootpath) + ) + except ValueError: + source_path_str = str(source_path) + msg = ( + "The requested fixture has no parameter defined for test:\n" + " {}\n\n" + "Requested fixture '{}' defined in:\n{}" + "\n\nRequested here:\n{}:{}".format( + funcitem.nodeid, + fixturedef.argname, + getlocation(fixturedef.func, funcitem.config.rootpath), + source_path_str, + source_lineno, + ) + ) + fail(msg, pytrace=False) + + subrequest = SubRequest( + self, scope, param, param_index, fixturedef, _ispytest=True + ) + + # Check if a higher-level scoped fixture accesses a lower level one. + subrequest._check_scope(argname, self._scope, scope) + try: + # Call the fixture function. + fixturedef.execute(request=subrequest) + finally: + self._schedule_finalizers(fixturedef, subrequest) + + def _schedule_finalizers( + self, fixturedef: "FixtureDef[object]", subrequest: "SubRequest" + ) -> None: + # If fixture function failed it might have registered finalizers. + subrequest.node.addfinalizer(lambda: fixturedef.finish(request=subrequest)) + + def _check_scope( + self, + argname: str, + invoking_scope: Scope, + requested_scope: Scope, + ) -> None: + if argname == "request": + return + if invoking_scope > requested_scope: + # Try to report something helpful. + text = "\n".join(self._factorytraceback()) + fail( + f"ScopeMismatch: You tried to access the {requested_scope.value} scoped " + f"fixture {argname} with a {invoking_scope.value} scoped request object, " + f"involved factories:\n{text}", + pytrace=False, + ) + + def _factorytraceback(self) -> List[str]: + lines = [] + for fixturedef in self._get_fixturestack(): + factory = fixturedef.func + fs, lineno = getfslineno(factory) + if isinstance(fs, Path): + session: Session = self._pyfuncitem.session + p = bestrelpath(session.path, fs) + else: + p = fs + args = _format_args(factory) + lines.append("%s:%d: def %s%s" % (p, lineno + 1, factory.__name__, args)) + return lines + + def __repr__(self) -> str: + return "" % (self.node) + + +@final +class SubRequest(FixtureRequest): + """A sub request for handling getting a fixture from a test function/fixture.""" + + def __init__( + self, + request: "FixtureRequest", + scope: Scope, + param: Any, + param_index: int, + fixturedef: "FixtureDef[object]", + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._parent_request = request + self.fixturename = fixturedef.argname + if param is not NOTSET: + self.param = param + self.param_index = param_index + self._scope = scope + self._fixturedef = fixturedef + self._pyfuncitem = request._pyfuncitem + self._fixture_defs = request._fixture_defs + self._arg2fixturedefs = request._arg2fixturedefs + self._arg2index = request._arg2index + self._fixturemanager = request._fixturemanager + + def __repr__(self) -> str: + return f"" + + def addfinalizer(self, finalizer: Callable[[], object]) -> None: + """Add finalizer/teardown function to be called without arguments after + the last test within the requesting test context finished execution.""" + self._fixturedef.addfinalizer(finalizer) + + def _schedule_finalizers( + self, fixturedef: "FixtureDef[object]", subrequest: "SubRequest" + ) -> None: + # If the executing fixturedef was not explicitly requested in the argument list (via + # getfixturevalue inside the fixture call) then ensure this fixture def will be finished + # first. + if fixturedef.argname not in self.fixturenames: + fixturedef.addfinalizer( + functools.partial(self._fixturedef.finish, request=self) + ) + super()._schedule_finalizers(fixturedef, subrequest) + + +@final +class FixtureLookupError(LookupError): + """Could not return a requested fixture (missing or invalid).""" + + def __init__( + self, argname: Optional[str], request: FixtureRequest, msg: Optional[str] = None + ) -> None: + self.argname = argname + self.request = request + self.fixturestack = request._get_fixturestack() + self.msg = msg + + def formatrepr(self) -> "FixtureLookupErrorRepr": + tblines: List[str] = [] + addline = tblines.append + stack = [self.request._pyfuncitem.obj] + stack.extend(map(lambda x: x.func, self.fixturestack)) + msg = self.msg + if msg is not None: + # The last fixture raise an error, let's present + # it at the requesting side. + stack = stack[:-1] + for function in stack: + fspath, lineno = getfslineno(function) + try: + lines, _ = inspect.getsourcelines(get_real_func(function)) + except (OSError, IndexError, TypeError): + error_msg = "file %s, line %s: source code not available" + addline(error_msg % (fspath, lineno + 1)) + else: + addline(f"file {fspath}, line {lineno + 1}") + for i, line in enumerate(lines): + line = line.rstrip() + addline(" " + line) + if line.lstrip().startswith("def"): + break + + if msg is None: + fm = self.request._fixturemanager + available = set() + parentid = self.request._pyfuncitem.parent.nodeid + for name, fixturedefs in fm._arg2fixturedefs.items(): + faclist = list(fm._matchfactories(fixturedefs, parentid)) + if faclist: + available.add(name) + if self.argname in available: + msg = " recursive dependency involving fixture '{}' detected".format( + self.argname + ) + else: + msg = f"fixture '{self.argname}' not found" + msg += "\n available fixtures: {}".format(", ".join(sorted(available))) + msg += "\n use 'pytest --fixtures [testpath]' for help on them." + + return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname) + + +class FixtureLookupErrorRepr(TerminalRepr): + def __init__( + self, + filename: Union[str, "os.PathLike[str]"], + firstlineno: int, + tblines: Sequence[str], + errorstring: str, + argname: Optional[str], + ) -> None: + self.tblines = tblines + self.errorstring = errorstring + self.filename = filename + self.firstlineno = firstlineno + self.argname = argname + + def toterminal(self, tw: TerminalWriter) -> None: + # tw.line("FixtureLookupError: %s" %(self.argname), red=True) + for tbline in self.tblines: + tw.line(tbline.rstrip()) + lines = self.errorstring.split("\n") + if lines: + tw.line( + f"{FormattedExcinfo.fail_marker} {lines[0].strip()}", + red=True, + ) + for line in lines[1:]: + tw.line( + f"{FormattedExcinfo.flow_marker} {line.strip()}", + red=True, + ) + tw.line() + tw.line("%s:%d" % (os.fspath(self.filename), self.firstlineno + 1)) + + +def fail_fixturefunc(fixturefunc, msg: str) -> NoReturn: + fs, lineno = getfslineno(fixturefunc) + location = f"{fs}:{lineno + 1}" + source = _pytest._code.Source(fixturefunc) + fail(msg + ":\n\n" + str(source.indent()) + "\n" + location, pytrace=False) + + +def call_fixture_func( + fixturefunc: "_FixtureFunc[FixtureValue]", request: FixtureRequest, kwargs +) -> FixtureValue: + if is_generator(fixturefunc): + fixturefunc = cast( + Callable[..., Generator[FixtureValue, None, None]], fixturefunc + ) + generator = fixturefunc(**kwargs) + try: + fixture_result = next(generator) + except StopIteration: + raise ValueError(f"{request.fixturename} did not yield a value") from None + finalizer = functools.partial(_teardown_yield_fixture, fixturefunc, generator) + request.addfinalizer(finalizer) + else: + fixturefunc = cast(Callable[..., FixtureValue], fixturefunc) + fixture_result = fixturefunc(**kwargs) + return fixture_result + + +def _teardown_yield_fixture(fixturefunc, it) -> None: + """Execute the teardown of a fixture function by advancing the iterator + after the yield and ensure the iteration ends (if not it means there is + more than one yield in the function).""" + try: + next(it) + except StopIteration: + pass + else: + fail_fixturefunc(fixturefunc, "fixture function has more than one 'yield'") + + +def _eval_scope_callable( + scope_callable: "Callable[[str, Config], _ScopeName]", + fixture_name: str, + config: Config, +) -> "_ScopeName": + try: + # Type ignored because there is no typing mechanism to specify + # keyword arguments, currently. + result = scope_callable(fixture_name=fixture_name, config=config) # type: ignore[call-arg] + except Exception as e: + raise TypeError( + "Error evaluating {} while defining fixture '{}'.\n" + "Expected a function with the signature (*, fixture_name, config)".format( + scope_callable, fixture_name + ) + ) from e + if not isinstance(result, str): + fail( + "Expected {} to return a 'str' while defining fixture '{}', but it returned:\n" + "{!r}".format(scope_callable, fixture_name, result), + pytrace=False, + ) + return result + + +@final +class FixtureDef(Generic[FixtureValue]): + """A container for a fixture definition.""" + + def __init__( + self, + fixturemanager: "FixtureManager", + baseid: Optional[str], + argname: str, + func: "_FixtureFunc[FixtureValue]", + scope: Union[Scope, "_ScopeName", Callable[[str, Config], "_ScopeName"], None], + params: Optional[Sequence[object]], + unittest: bool = False, + ids: Optional[ + Union[Tuple[Optional[object], ...], Callable[[Any], Optional[object]]] + ] = None, + ) -> None: + self._fixturemanager = fixturemanager + # The "base" node ID for the fixture. + # + # This is a node ID prefix. A fixture is only available to a node (e.g. + # a `Function` item) if the fixture's baseid is a parent of the node's + # nodeid (see the `iterparentnodeids` function for what constitutes a + # "parent" and a "prefix" in this context). + # + # For a fixture found in a Collector's object (e.g. a `Module`s module, + # a `Class`'s class), the baseid is the Collector's nodeid. + # + # For a fixture found in a conftest plugin, the baseid is the conftest's + # directory path relative to the rootdir. + # + # For other plugins, the baseid is the empty string (always matches). + self.baseid = baseid or "" + # Whether the fixture was found from a node or a conftest in the + # collection tree. Will be false for fixtures defined in non-conftest + # plugins. + self.has_location = baseid is not None + # The fixture factory function. + self.func = func + # The name by which the fixture may be requested. + self.argname = argname + if scope is None: + scope = Scope.Function + elif callable(scope): + scope = _eval_scope_callable(scope, argname, fixturemanager.config) + if isinstance(scope, str): + scope = Scope.from_user( + scope, descr=f"Fixture '{func.__name__}'", where=baseid + ) + self._scope = scope + # If the fixture is directly parametrized, the parameter values. + self.params: Optional[Sequence[object]] = params + # If the fixture is directly parametrized, a tuple of explicit IDs to + # assign to the parameter values, or a callable to generate an ID given + # a parameter value. + self.ids = ids + # The names requested by the fixtures. + self.argnames = getfuncargnames(func, name=argname, is_method=unittest) + # Whether the fixture was collected from a unittest TestCase class. + # Note that it really only makes sense to define autouse fixtures in + # unittest TestCases. + self.unittest = unittest + # If the fixture was executed, the current value of the fixture. + # Can change if the fixture is executed with different parameters. + self.cached_result: Optional[_FixtureCachedResult[FixtureValue]] = None + self._finalizers: List[Callable[[], object]] = [] + + @property + def scope(self) -> "_ScopeName": + """Scope string, one of "function", "class", "module", "package", "session".""" + return self._scope.value + + def addfinalizer(self, finalizer: Callable[[], object]) -> None: + self._finalizers.append(finalizer) + + def finish(self, request: SubRequest) -> None: + exc = None + try: + while self._finalizers: + try: + func = self._finalizers.pop() + func() + except BaseException as e: + # XXX Only first exception will be seen by user, + # ideally all should be reported. + if exc is None: + exc = e + if exc: + raise exc + finally: + ihook = request.node.ihook + ihook.pytest_fixture_post_finalizer(fixturedef=self, request=request) + # Even if finalization fails, we invalidate the cached fixture + # value and remove all finalizers because they may be bound methods + # which will keep instances alive. + self.cached_result = None + self._finalizers = [] + + def execute(self, request: SubRequest) -> FixtureValue: + # Get required arguments and register our own finish() + # with their finalization. + for argname in self.argnames: + fixturedef = request._get_active_fixturedef(argname) + if argname != "request": + # PseudoFixtureDef is only for "request". + assert isinstance(fixturedef, FixtureDef) + fixturedef.addfinalizer(functools.partial(self.finish, request=request)) + + my_cache_key = self.cache_key(request) + if self.cached_result is not None: + # note: comparison with `==` can fail (or be expensive) for e.g. + # numpy arrays (#6497). + cache_key = self.cached_result[1] + if my_cache_key is cache_key: + if self.cached_result[2] is not None: + _, val, tb = self.cached_result[2] + raise val.with_traceback(tb) + else: + result = self.cached_result[0] + return result + # We have a previous but differently parametrized fixture instance + # so we need to tear it down before creating a new one. + self.finish(request) + assert self.cached_result is None + + ihook = request.node.ihook + result = ihook.pytest_fixture_setup(fixturedef=self, request=request) + return result + + def cache_key(self, request: SubRequest) -> object: + return request.param_index if not hasattr(request, "param") else request.param + + def __repr__(self) -> str: + return "".format( + self.argname, self.scope, self.baseid + ) + + +def resolve_fixture_function( + fixturedef: FixtureDef[FixtureValue], request: FixtureRequest +) -> "_FixtureFunc[FixtureValue]": + """Get the actual callable that can be called to obtain the fixture + value, dealing with unittest-specific instances and bound methods.""" + fixturefunc = fixturedef.func + if fixturedef.unittest: + if request.instance is not None: + # Bind the unbound method to the TestCase instance. + fixturefunc = fixturedef.func.__get__(request.instance) # type: ignore[union-attr] + else: + # The fixture function needs to be bound to the actual + # request.instance so that code working with "fixturedef" behaves + # as expected. + if request.instance is not None: + # Handle the case where fixture is defined not in a test class, but some other class + # (for example a plugin class with a fixture), see #2270. + if hasattr(fixturefunc, "__self__") and not isinstance( + request.instance, fixturefunc.__self__.__class__ # type: ignore[union-attr] + ): + return fixturefunc + fixturefunc = getimfunc(fixturedef.func) + if fixturefunc != fixturedef.func: + fixturefunc = fixturefunc.__get__(request.instance) # type: ignore[union-attr] + return fixturefunc + + +def pytest_fixture_setup( + fixturedef: FixtureDef[FixtureValue], request: SubRequest +) -> FixtureValue: + """Execution of fixture setup.""" + kwargs = {} + for argname in fixturedef.argnames: + fixdef = request._get_active_fixturedef(argname) + assert fixdef.cached_result is not None + result, arg_cache_key, exc = fixdef.cached_result + request._check_scope(argname, request._scope, fixdef._scope) + kwargs[argname] = result + + fixturefunc = resolve_fixture_function(fixturedef, request) + my_cache_key = fixturedef.cache_key(request) + try: + result = call_fixture_func(fixturefunc, request, kwargs) + except TEST_OUTCOME: + exc_info = sys.exc_info() + assert exc_info[0] is not None + if isinstance( + exc_info[1], skip.Exception + ) and not fixturefunc.__name__.startswith("xunit_setup"): + exc_info[1]._use_item_location = True # type: ignore[attr-defined] + fixturedef.cached_result = (None, my_cache_key, exc_info) + raise + fixturedef.cached_result = (result, my_cache_key, None) + return result + + +def _ensure_immutable_ids( + ids: Optional[Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]]] +) -> Optional[Union[Tuple[Optional[object], ...], Callable[[Any], Optional[object]]]]: + if ids is None: + return None + if callable(ids): + return ids + return tuple(ids) + + +def _params_converter( + params: Optional[Iterable[object]], +) -> Optional[Tuple[object, ...]]: + return tuple(params) if params is not None else None + + +def wrap_function_to_error_out_if_called_directly( + function: FixtureFunction, + fixture_marker: "FixtureFunctionMarker", +) -> FixtureFunction: + """Wrap the given fixture function so we can raise an error about it being called directly, + instead of used as an argument in a test function.""" + message = ( + 'Fixture "{name}" called directly. Fixtures are not meant to be called directly,\n' + "but are created automatically when test functions request them as parameters.\n" + "See https://docs.pytest.org/en/stable/explanation/fixtures.html for more information about fixtures, and\n" + "https://docs.pytest.org/en/stable/deprecations.html#calling-fixtures-directly about how to update your code." + ).format(name=fixture_marker.name or function.__name__) + + @functools.wraps(function) + def result(*args, **kwargs): + fail(message, pytrace=False) + + # Keep reference to the original function in our own custom attribute so we don't unwrap + # further than this point and lose useful wrappings like @mock.patch (#3774). + result.__pytest_wrapped__ = _PytestWrapper(function) # type: ignore[attr-defined] + + return cast(FixtureFunction, result) + + +@final +@dataclasses.dataclass(frozen=True) +class FixtureFunctionMarker: + scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]" + params: Optional[Tuple[object, ...]] + autouse: bool = False + ids: Optional[ + Union[Tuple[Optional[object], ...], Callable[[Any], Optional[object]]] + ] = None + name: Optional[str] = None + + _ispytest: dataclasses.InitVar[bool] = False + + def __post_init__(self, _ispytest: bool) -> None: + check_ispytest(_ispytest) + + def __call__(self, function: FixtureFunction) -> FixtureFunction: + if inspect.isclass(function): + raise ValueError("class fixtures not supported (maybe in the future)") + + if getattr(function, "_pytestfixturefunction", False): + raise ValueError( + "fixture is being applied more than once to the same function" + ) + + function = wrap_function_to_error_out_if_called_directly(function, self) + + name = self.name or function.__name__ + if name == "request": + location = getlocation(function) + fail( + "'request' is a reserved word for fixtures, use another name:\n {}".format( + location + ), + pytrace=False, + ) + + # Type ignored because https://github.com/python/mypy/issues/2087. + function._pytestfixturefunction = self # type: ignore[attr-defined] + return function + + +@overload +def fixture( + fixture_function: FixtureFunction, + *, + scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]" = ..., + params: Optional[Iterable[object]] = ..., + autouse: bool = ..., + ids: Optional[ + Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]] + ] = ..., + name: Optional[str] = ..., +) -> FixtureFunction: + ... + + +@overload +def fixture( # noqa: F811 + fixture_function: None = ..., + *, + scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]" = ..., + params: Optional[Iterable[object]] = ..., + autouse: bool = ..., + ids: Optional[ + Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]] + ] = ..., + name: Optional[str] = None, +) -> FixtureFunctionMarker: + ... + + +def fixture( # noqa: F811 + fixture_function: Optional[FixtureFunction] = None, + *, + scope: "Union[_ScopeName, Callable[[str, Config], _ScopeName]]" = "function", + params: Optional[Iterable[object]] = None, + autouse: bool = False, + ids: Optional[ + Union[Sequence[Optional[object]], Callable[[Any], Optional[object]]] + ] = None, + name: Optional[str] = None, +) -> Union[FixtureFunctionMarker, FixtureFunction]: + """Decorator to mark a fixture factory function. + + This decorator can be used, with or without parameters, to define a + fixture function. + + The name of the fixture function can later be referenced to cause its + invocation ahead of running tests: test modules or classes can use the + ``pytest.mark.usefixtures(fixturename)`` marker. + + Test functions can directly use fixture names as input arguments in which + case the fixture instance returned from the fixture function will be + injected. + + Fixtures can provide their values to test functions using ``return`` or + ``yield`` statements. When using ``yield`` the code block after the + ``yield`` statement is executed as teardown code regardless of the test + outcome, and must yield exactly once. + + :param scope: + The scope for which this fixture is shared; one of ``"function"`` + (default), ``"class"``, ``"module"``, ``"package"`` or ``"session"``. + + This parameter may also be a callable which receives ``(fixture_name, config)`` + as parameters, and must return a ``str`` with one of the values mentioned above. + + See :ref:`dynamic scope` in the docs for more information. + + :param params: + An optional list of parameters which will cause multiple invocations + of the fixture function and all of the tests using it. The current + parameter is available in ``request.param``. + + :param autouse: + If True, the fixture func is activated for all tests that can see it. + If False (the default), an explicit reference is needed to activate + the fixture. + + :param ids: + Sequence of ids each corresponding to the params so that they are + part of the test id. If no ids are provided they will be generated + automatically from the params. + + :param name: + The name of the fixture. This defaults to the name of the decorated + function. If a fixture is used in the same module in which it is + defined, the function name of the fixture will be shadowed by the + function arg that requests the fixture; one way to resolve this is to + name the decorated function ``fixture_`` and then use + ``@pytest.fixture(name='')``. + """ + fixture_marker = FixtureFunctionMarker( + scope=scope, + params=tuple(params) if params is not None else None, + autouse=autouse, + ids=None if ids is None else ids if callable(ids) else tuple(ids), + name=name, + _ispytest=True, + ) + + # Direct decoration. + if fixture_function: + return fixture_marker(fixture_function) + + return fixture_marker + + +def yield_fixture( + fixture_function=None, + *args, + scope="function", + params=None, + autouse=False, + ids=None, + name=None, +): + """(Return a) decorator to mark a yield-fixture factory function. + + .. deprecated:: 3.0 + Use :py:func:`pytest.fixture` directly instead. + """ + warnings.warn(YIELD_FIXTURE, stacklevel=2) + return fixture( + fixture_function, + *args, + scope=scope, + params=params, + autouse=autouse, + ids=ids, + name=name, + ) + + +@fixture(scope="session") +def pytestconfig(request: FixtureRequest) -> Config: + """Session-scoped fixture that returns the session's :class:`pytest.Config` + object. + + Example:: + + def test_foo(pytestconfig): + if pytestconfig.getoption("verbose") > 0: + ... + + """ + return request.config + + +def pytest_addoption(parser: Parser) -> None: + parser.addini( + "usefixtures", + type="args", + default=[], + help="List of default fixtures to be used with this project", + ) + + +class FixtureManager: + """pytest fixture definitions and information is stored and managed + from this class. + + During collection fm.parsefactories() is called multiple times to parse + fixture function definitions into FixtureDef objects and internal + data structures. + + During collection of test functions, metafunc-mechanics instantiate + a FuncFixtureInfo object which is cached per node/func-name. + This FuncFixtureInfo object is later retrieved by Function nodes + which themselves offer a fixturenames attribute. + + The FuncFixtureInfo object holds information about fixtures and FixtureDefs + relevant for a particular function. An initial list of fixtures is + assembled like this: + + - ini-defined usefixtures + - autouse-marked fixtures along the collection chain up from the function + - usefixtures markers at module/class/function level + - test function funcargs + + Subsequently the funcfixtureinfo.fixturenames attribute is computed + as the closure of the fixtures needed to setup the initial fixtures, + i.e. fixtures needed by fixture functions themselves are appended + to the fixturenames list. + + Upon the test-setup phases all fixturenames are instantiated, retrieved + by a lookup of their FuncFixtureInfo. + """ + + FixtureLookupError = FixtureLookupError + FixtureLookupErrorRepr = FixtureLookupErrorRepr + + def __init__(self, session: "Session") -> None: + self.session = session + self.config: Config = session.config + self._arg2fixturedefs: Dict[str, List[FixtureDef[Any]]] = {} + self._holderobjseen: Set[object] = set() + # A mapping from a nodeid to a list of autouse fixtures it defines. + self._nodeid_autousenames: Dict[str, List[str]] = { + "": self.config.getini("usefixtures"), + } + session.config.pluginmanager.register(self, "funcmanage") + + def _get_direct_parametrize_args(self, node: nodes.Node) -> List[str]: + """Return all direct parametrization arguments of a node, so we don't + mistake them for fixtures. + + Check https://github.com/pytest-dev/pytest/issues/5036. + + These things are done later as well when dealing with parametrization + so this could be improved. + """ + parametrize_argnames: List[str] = [] + for marker in node.iter_markers(name="parametrize"): + if not marker.kwargs.get("indirect", False): + p_argnames, _ = ParameterSet._parse_parametrize_args( + *marker.args, **marker.kwargs + ) + parametrize_argnames.extend(p_argnames) + + return parametrize_argnames + + def getfixtureinfo( + self, node: nodes.Node, func, cls, funcargs: bool = True + ) -> FuncFixtureInfo: + if funcargs and not getattr(node, "nofuncargs", False): + argnames = getfuncargnames(func, name=node.name, cls=cls) + else: + argnames = () + + usefixtures = tuple( + arg for mark in node.iter_markers(name="usefixtures") for arg in mark.args + ) + initialnames = usefixtures + argnames + fm = node.session._fixturemanager + initialnames, names_closure, arg2fixturedefs = fm.getfixtureclosure( + initialnames, node, ignore_args=self._get_direct_parametrize_args(node) + ) + return FuncFixtureInfo(argnames, initialnames, names_closure, arg2fixturedefs) + + def pytest_plugin_registered(self, plugin: _PluggyPlugin) -> None: + nodeid = None + try: + p = absolutepath(plugin.__file__) # type: ignore[attr-defined] + except AttributeError: + pass + else: + # Construct the base nodeid which is later used to check + # what fixtures are visible for particular tests (as denoted + # by their test id). + if p.name.startswith("conftest.py"): + try: + nodeid = str(p.parent.relative_to(self.config.rootpath)) + except ValueError: + nodeid = "" + if nodeid == ".": + nodeid = "" + if os.sep != nodes.SEP: + nodeid = nodeid.replace(os.sep, nodes.SEP) + + self.parsefactories(plugin, nodeid) + + def _getautousenames(self, nodeid: str) -> Iterator[str]: + """Return the names of autouse fixtures applicable to nodeid.""" + for parentnodeid in nodes.iterparentnodeids(nodeid): + basenames = self._nodeid_autousenames.get(parentnodeid) + if basenames: + yield from basenames + + def getfixtureclosure( + self, + fixturenames: Tuple[str, ...], + parentnode: nodes.Node, + ignore_args: Sequence[str] = (), + ) -> Tuple[Tuple[str, ...], List[str], Dict[str, Sequence[FixtureDef[Any]]]]: + # Collect the closure of all fixtures, starting with the given + # fixturenames as the initial set. As we have to visit all + # factory definitions anyway, we also return an arg2fixturedefs + # mapping so that the caller can reuse it and does not have + # to re-discover fixturedefs again for each fixturename + # (discovering matching fixtures for a given name/node is expensive). + + parentid = parentnode.nodeid + fixturenames_closure = list(self._getautousenames(parentid)) + + def merge(otherlist: Iterable[str]) -> None: + for arg in otherlist: + if arg not in fixturenames_closure: + fixturenames_closure.append(arg) + + merge(fixturenames) + + # At this point, fixturenames_closure contains what we call "initialnames", + # which is a set of fixturenames the function immediately requests. We + # need to return it as well, so save this. + initialnames = tuple(fixturenames_closure) + + arg2fixturedefs: Dict[str, Sequence[FixtureDef[Any]]] = {} + lastlen = -1 + while lastlen != len(fixturenames_closure): + lastlen = len(fixturenames_closure) + for argname in fixturenames_closure: + if argname in ignore_args: + continue + if argname in arg2fixturedefs: + continue + fixturedefs = self.getfixturedefs(argname, parentid) + if fixturedefs: + arg2fixturedefs[argname] = fixturedefs + merge(fixturedefs[-1].argnames) + + def sort_by_scope(arg_name: str) -> Scope: + try: + fixturedefs = arg2fixturedefs[arg_name] + except KeyError: + return Scope.Function + else: + return fixturedefs[-1]._scope + + fixturenames_closure.sort(key=sort_by_scope, reverse=True) + return initialnames, fixturenames_closure, arg2fixturedefs + + def pytest_generate_tests(self, metafunc: "Metafunc") -> None: + """Generate new tests based on parametrized fixtures used by the given metafunc""" + + def get_parametrize_mark_argnames(mark: Mark) -> Sequence[str]: + args, _ = ParameterSet._parse_parametrize_args(*mark.args, **mark.kwargs) + return args + + for argname in metafunc.fixturenames: + # Get the FixtureDefs for the argname. + fixture_defs = metafunc._arg2fixturedefs.get(argname) + if not fixture_defs: + # Will raise FixtureLookupError at setup time if not parametrized somewhere + # else (e.g @pytest.mark.parametrize) + continue + + # If the test itself parametrizes using this argname, give it + # precedence. + if any( + argname in get_parametrize_mark_argnames(mark) + for mark in metafunc.definition.iter_markers("parametrize") + ): + continue + + # In the common case we only look at the fixture def with the + # closest scope (last in the list). But if the fixture overrides + # another fixture, while requesting the super fixture, keep going + # in case the super fixture is parametrized (#1953). + for fixturedef in reversed(fixture_defs): + # Fixture is parametrized, apply it and stop. + if fixturedef.params is not None: + metafunc.parametrize( + argname, + fixturedef.params, + indirect=True, + scope=fixturedef.scope, + ids=fixturedef.ids, + ) + break + + # Not requesting the overridden super fixture, stop. + if argname not in fixturedef.argnames: + break + + # Try next super fixture, if any. + + def pytest_collection_modifyitems(self, items: List[nodes.Item]) -> None: + # Separate parametrized setups. + items[:] = reorder_items(items) + + @overload + def parsefactories( + self, + node_or_obj: nodes.Node, + *, + unittest: bool = ..., + ) -> None: + raise NotImplementedError() + + @overload + def parsefactories( # noqa: F811 + self, + node_or_obj: object, + nodeid: Optional[str], + *, + unittest: bool = ..., + ) -> None: + raise NotImplementedError() + + def parsefactories( # noqa: F811 + self, + node_or_obj: Union[nodes.Node, object], + nodeid: Union[str, NotSetType, None] = NOTSET, + *, + unittest: bool = False, + ) -> None: + """Collect fixtures from a collection node or object. + + Found fixtures are parsed into `FixtureDef`s and saved. + + If `node_or_object` is a collection node (with an underlying Python + object), the node's object is traversed and the node's nodeid is used to + determine the fixtures' visibilty. `nodeid` must not be specified in + this case. + + If `node_or_object` is an object (e.g. a plugin), the object is + traversed and the given `nodeid` is used to determine the fixtures' + visibility. `nodeid` must be specified in this case; None and "" mean + total visibility. + """ + if nodeid is not NOTSET: + holderobj = node_or_obj + else: + assert isinstance(node_or_obj, nodes.Node) + holderobj = cast(object, node_or_obj.obj) # type: ignore[attr-defined] + assert isinstance(node_or_obj.nodeid, str) + nodeid = node_or_obj.nodeid + if holderobj in self._holderobjseen: + return + + self._holderobjseen.add(holderobj) + autousenames = [] + for name in dir(holderobj): + # ugly workaround for one of the fspath deprecated property of node + # todo: safely generalize + if isinstance(holderobj, nodes.Node) and name == "fspath": + continue + + # The attribute can be an arbitrary descriptor, so the attribute + # access below can raise. safe_getatt() ignores such exceptions. + obj = safe_getattr(holderobj, name, None) + marker = getfixturemarker(obj) + if not isinstance(marker, FixtureFunctionMarker): + # Magic globals with __getattr__ might have got us a wrong + # fixture attribute. + continue + + if marker.name: + name = marker.name + + # During fixture definition we wrap the original fixture function + # to issue a warning if called directly, so here we unwrap it in + # order to not emit the warning when pytest itself calls the + # fixture function. + obj = get_real_method(obj, holderobj) + + fixture_def = FixtureDef( + fixturemanager=self, + baseid=nodeid, + argname=name, + func=obj, + scope=marker.scope, + params=marker.params, + unittest=unittest, + ids=marker.ids, + ) + + faclist = self._arg2fixturedefs.setdefault(name, []) + if fixture_def.has_location: + faclist.append(fixture_def) + else: + # fixturedefs with no location are at the front + # so this inserts the current fixturedef after the + # existing fixturedefs from external plugins but + # before the fixturedefs provided in conftests. + i = len([f for f in faclist if not f.has_location]) + faclist.insert(i, fixture_def) + if marker.autouse: + autousenames.append(name) + + if autousenames: + self._nodeid_autousenames.setdefault(nodeid or "", []).extend(autousenames) + + def getfixturedefs( + self, argname: str, nodeid: str + ) -> Optional[Sequence[FixtureDef[Any]]]: + """Get a list of fixtures which are applicable to the given node id. + + :param str argname: Name of the fixture to search for. + :param str nodeid: Full node id of the requesting test. + :rtype: Sequence[FixtureDef] + """ + try: + fixturedefs = self._arg2fixturedefs[argname] + except KeyError: + return None + return tuple(self._matchfactories(fixturedefs, nodeid)) + + def _matchfactories( + self, fixturedefs: Iterable[FixtureDef[Any]], nodeid: str + ) -> Iterator[FixtureDef[Any]]: + parentnodeids = set(nodes.iterparentnodeids(nodeid)) + for fixturedef in fixturedefs: + if fixturedef.baseid in parentnodeids: + yield fixturedef diff --git a/venv/lib/python3.12/site-packages/_pytest/freeze_support.py b/venv/lib/python3.12/site-packages/_pytest/freeze_support.py new file mode 100644 index 0000000000..9f8ea231fe --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/freeze_support.py @@ -0,0 +1,44 @@ +"""Provides a function to report all internal modules for using freezing +tools.""" +import types +from typing import Iterator +from typing import List +from typing import Union + + +def freeze_includes() -> List[str]: + """Return a list of module names used by pytest that should be + included by cx_freeze.""" + import _pytest + + result = list(_iter_all_modules(_pytest)) + return result + + +def _iter_all_modules( + package: Union[str, types.ModuleType], + prefix: str = "", +) -> Iterator[str]: + """Iterate over the names of all modules that can be found in the given + package, recursively. + + >>> import _pytest + >>> list(_iter_all_modules(_pytest)) + ['_pytest._argcomplete', '_pytest._code.code', ...] + """ + import os + import pkgutil + + if isinstance(package, str): + path = package + else: + # Type ignored because typeshed doesn't define ModuleType.__path__ + # (only defined on packages). + package_path = package.__path__ # type: ignore[attr-defined] + path, prefix = package_path[0], package.__name__ + "." + for _, name, is_package in pkgutil.iter_modules([path]): + if is_package: + for m in _iter_all_modules(os.path.join(path, name), prefix=name + "."): + yield prefix + m + else: + yield prefix + name diff --git a/venv/lib/python3.12/site-packages/_pytest/helpconfig.py b/venv/lib/python3.12/site-packages/_pytest/helpconfig.py new file mode 100644 index 0000000000..ea16c43882 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/helpconfig.py @@ -0,0 +1,270 @@ +"""Version info, help messages, tracing configuration.""" +import os +import sys +from argparse import Action +from typing import List +from typing import Optional +from typing import Union + +import pytest +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import PrintHelp +from _pytest.config.argparsing import Parser +from _pytest.terminal import TerminalReporter + + +class HelpAction(Action): + """An argparse Action that will raise an exception in order to skip the + rest of the argument parsing when --help is passed. + + This prevents argparse from quitting due to missing required arguments + when any are defined, for example by ``pytest_addoption``. + This is similar to the way that the builtin argparse --help option is + implemented by raising SystemExit. + """ + + def __init__(self, option_strings, dest=None, default=False, help=None): + super().__init__( + option_strings=option_strings, + dest=dest, + const=True, + default=default, + nargs=0, + help=help, + ) + + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, self.const) + + # We should only skip the rest of the parsing after preparse is done. + if getattr(parser._parser, "after_preparse", False): + raise PrintHelp + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("debugconfig") + group.addoption( + "--version", + "-V", + action="count", + default=0, + dest="version", + help="Display pytest version and information about plugins. " + "When given twice, also display information about plugins.", + ) + group._addoption( + "-h", + "--help", + action=HelpAction, + dest="help", + help="Show help message and configuration info", + ) + group._addoption( + "-p", + action="append", + dest="plugins", + default=[], + metavar="name", + help="Early-load given plugin module name or entry point (multi-allowed). " + "To avoid loading of plugins, use the `no:` prefix, e.g. " + "`no:doctest`.", + ) + group.addoption( + "--traceconfig", + "--trace-config", + action="store_true", + default=False, + help="Trace considerations of conftest.py files", + ) + group.addoption( + "--debug", + action="store", + nargs="?", + const="pytestdebug.log", + dest="debug", + metavar="DEBUG_FILE_NAME", + help="Store internal tracing debug information in this log file. " + "This file is opened with 'w' and truncated as a result, care advised. " + "Default: pytestdebug.log.", + ) + group._addoption( + "-o", + "--override-ini", + dest="override_ini", + action="append", + help='Override ini option with "option=value" style, ' + "e.g. `-o xfail_strict=True -o cache_dir=cache`.", + ) + + +@pytest.hookimpl(hookwrapper=True) +def pytest_cmdline_parse(): + outcome = yield + config: Config = outcome.get_result() + + if config.option.debug: + # --debug | --debug was provided. + path = config.option.debug + debugfile = open(path, "w", encoding="utf-8") + debugfile.write( + "versions pytest-%s, " + "python-%s\ncwd=%s\nargs=%s\n\n" + % ( + pytest.__version__, + ".".join(map(str, sys.version_info)), + os.getcwd(), + config.invocation_params.args, + ) + ) + config.trace.root.setwriter(debugfile.write) + undo_tracing = config.pluginmanager.enable_tracing() + sys.stderr.write("writing pytest debug information to %s\n" % path) + + def unset_tracing() -> None: + debugfile.close() + sys.stderr.write("wrote pytest debug information to %s\n" % debugfile.name) + config.trace.root.setwriter(None) + undo_tracing() + + config.add_cleanup(unset_tracing) + + +def showversion(config: Config) -> None: + if config.option.version > 1: + sys.stdout.write( + "This is pytest version {}, imported from {}\n".format( + pytest.__version__, pytest.__file__ + ) + ) + plugininfo = getpluginversioninfo(config) + if plugininfo: + for line in plugininfo: + sys.stdout.write(line + "\n") + else: + sys.stdout.write(f"pytest {pytest.__version__}\n") + + +def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]: + if config.option.version > 0: + showversion(config) + return 0 + elif config.option.help: + config._do_configure() + showhelp(config) + config._ensure_unconfigure() + return 0 + return None + + +def showhelp(config: Config) -> None: + import textwrap + + reporter: Optional[TerminalReporter] = config.pluginmanager.get_plugin( + "terminalreporter" + ) + assert reporter is not None + tw = reporter._tw + tw.write(config._parser.optparser.format_help()) + tw.line() + tw.line( + "[pytest] ini-options in the first " + "pytest.ini|tox.ini|setup.cfg|pyproject.toml file found:" + ) + tw.line() + + columns = tw.fullwidth # costly call + indent_len = 24 # based on argparse's max_help_position=24 + indent = " " * indent_len + for name in config._parser._ininames: + help, type, default = config._parser._inidict[name] + if type is None: + type = "string" + if help is None: + raise TypeError(f"help argument cannot be None for {name}") + spec = f"{name} ({type}):" + tw.write(" %s" % spec) + spec_len = len(spec) + if spec_len > (indent_len - 3): + # Display help starting at a new line. + tw.line() + helplines = textwrap.wrap( + help, + columns, + initial_indent=indent, + subsequent_indent=indent, + break_on_hyphens=False, + ) + + for line in helplines: + tw.line(line) + else: + # Display help starting after the spec, following lines indented. + tw.write(" " * (indent_len - spec_len - 2)) + wrapped = textwrap.wrap(help, columns - indent_len, break_on_hyphens=False) + + if wrapped: + tw.line(wrapped[0]) + for line in wrapped[1:]: + tw.line(indent + line) + + tw.line() + tw.line("Environment variables:") + vars = [ + ("PYTEST_ADDOPTS", "Extra command line options"), + ("PYTEST_PLUGINS", "Comma-separated plugins to load during startup"), + ("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "Set to disable plugin auto-loading"), + ("PYTEST_DEBUG", "Set to enable debug tracing of pytest's internals"), + ] + for name, help in vars: + tw.line(f" {name:<24} {help}") + tw.line() + tw.line() + + tw.line("to see available markers type: pytest --markers") + tw.line("to see available fixtures type: pytest --fixtures") + tw.line( + "(shown according to specified file_or_dir or current dir " + "if not specified; fixtures with leading '_' are only shown " + "with the '-v' option" + ) + + for warningreport in reporter.stats.get("warnings", []): + tw.line("warning : " + warningreport.message, red=True) + return + + +conftest_options = [("pytest_plugins", "list of plugin names to load")] + + +def getpluginversioninfo(config: Config) -> List[str]: + lines = [] + plugininfo = config.pluginmanager.list_plugin_distinfo() + if plugininfo: + lines.append("setuptools registered plugins:") + for plugin, dist in plugininfo: + loc = getattr(plugin, "__file__", repr(plugin)) + content = f"{dist.project_name}-{dist.version} at {loc}" + lines.append(" " + content) + return lines + + +def pytest_report_header(config: Config) -> List[str]: + lines = [] + if config.option.debug or config.option.traceconfig: + lines.append(f"using: pytest-{pytest.__version__}") + + verinfo = getpluginversioninfo(config) + if verinfo: + lines.extend(verinfo) + + if config.option.traceconfig: + lines.append("active plugins:") + items = config.pluginmanager.list_name_plugin() + for name, plugin in items: + if hasattr(plugin, "__file__"): + r = plugin.__file__ + else: + r = repr(plugin) + lines.append(f" {name:<20}: {r}") + return lines diff --git a/venv/lib/python3.12/site-packages/_pytest/hookspec.py b/venv/lib/python3.12/site-packages/_pytest/hookspec.py new file mode 100644 index 0000000000..1f7c368f79 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/hookspec.py @@ -0,0 +1,979 @@ +"""Hook specifications for pytest plugins which are invoked by pytest itself +and by builtin plugins.""" +from pathlib import Path +from typing import Any +from typing import Dict +from typing import List +from typing import Mapping +from typing import Optional +from typing import Sequence +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union + +from pluggy import HookspecMarker + +from _pytest.deprecated import WARNING_CMDLINE_PREPARSE_HOOK + +if TYPE_CHECKING: + import pdb + import warnings + from typing_extensions import Literal + + from _pytest._code.code import ExceptionRepr + from _pytest._code.code import ExceptionInfo + from _pytest.config import Config + from _pytest.config import ExitCode + from _pytest.config import PytestPluginManager + from _pytest.config import _PluggyPlugin + from _pytest.config.argparsing import Parser + from _pytest.fixtures import FixtureDef + from _pytest.fixtures import SubRequest + from _pytest.main import Session + from _pytest.nodes import Collector + from _pytest.nodes import Item + from _pytest.outcomes import Exit + from _pytest.python import Class + from _pytest.python import Function + from _pytest.python import Metafunc + from _pytest.python import Module + from _pytest.reports import CollectReport + from _pytest.reports import TestReport + from _pytest.runner import CallInfo + from _pytest.terminal import TerminalReporter + from _pytest.terminal import TestShortLogReport + from _pytest.compat import LEGACY_PATH + + +hookspec = HookspecMarker("pytest") + +# ------------------------------------------------------------------------- +# Initialization hooks called for every plugin +# ------------------------------------------------------------------------- + + +@hookspec(historic=True) +def pytest_addhooks(pluginmanager: "PytestPluginManager") -> None: + """Called at plugin registration time to allow adding new hooks via a call to + ``pluginmanager.add_hookspecs(module_or_class, prefix)``. + + :param pytest.PytestPluginManager pluginmanager: The pytest plugin manager. + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + """ + + +@hookspec(historic=True) +def pytest_plugin_registered( + plugin: "_PluggyPlugin", manager: "PytestPluginManager" +) -> None: + """A new pytest plugin got registered. + + :param plugin: The plugin module or instance. + :param pytest.PytestPluginManager manager: pytest plugin manager. + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + """ + + +@hookspec(historic=True) +def pytest_addoption(parser: "Parser", pluginmanager: "PytestPluginManager") -> None: + """Register argparse-style options and ini-style config values, + called once at the beginning of a test run. + + .. note:: + + This function should be implemented only in plugins or ``conftest.py`` + files situated at the tests root directory due to how pytest + :ref:`discovers plugins during startup `. + + :param pytest.Parser parser: + To add command line options, call + :py:func:`parser.addoption(...) `. + To add ini-file values call :py:func:`parser.addini(...) + `. + + :param pytest.PytestPluginManager pluginmanager: + The pytest plugin manager, which can be used to install :py:func:`hookspec`'s + or :py:func:`hookimpl`'s and allow one plugin to call another plugin's hooks + to change how command line options are added. + + Options can later be accessed through the + :py:class:`config ` object, respectively: + + - :py:func:`config.getoption(name) ` to + retrieve the value of a command line option. + + - :py:func:`config.getini(name) ` to retrieve + a value read from an ini-style file. + + The config object is passed around on many internal objects via the ``.config`` + attribute or can be retrieved as the ``pytestconfig`` fixture. + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + """ + + +@hookspec(historic=True) +def pytest_configure(config: "Config") -> None: + """Allow plugins and conftest files to perform initial configuration. + + This hook is called for every plugin and initial conftest file + after command line options have been parsed. + + After that, the hook is called for other conftest files as they are + imported. + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + + :param pytest.Config config: The pytest config object. + """ + + +# ------------------------------------------------------------------------- +# Bootstrapping hooks called for plugins registered early enough: +# internal and 3rd party plugins. +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_cmdline_parse( + pluginmanager: "PytestPluginManager", args: List[str] +) -> Optional["Config"]: + """Return an initialized :class:`~pytest.Config`, parsing the specified args. + + Stops at first non-None result, see :ref:`firstresult`. + + .. note:: + This hook will only be called for plugin classes passed to the + ``plugins`` arg when using `pytest.main`_ to perform an in-process + test run. + + :param pluginmanager: The pytest plugin manager. + :param args: List of arguments passed on the command line. + :returns: A pytest config object. + """ + + +@hookspec(warn_on_impl=WARNING_CMDLINE_PREPARSE_HOOK) +def pytest_cmdline_preparse(config: "Config", args: List[str]) -> None: + """(**Deprecated**) modify command line arguments before option parsing. + + This hook is considered deprecated and will be removed in a future pytest version. Consider + using :hook:`pytest_load_initial_conftests` instead. + + .. note:: + This hook will not be called for ``conftest.py`` files, only for setuptools plugins. + + :param config: The pytest config object. + :param args: Arguments passed on the command line. + """ + + +@hookspec(firstresult=True) +def pytest_cmdline_main(config: "Config") -> Optional[Union["ExitCode", int]]: + """Called for performing the main command line action. The default + implementation will invoke the configure hooks and runtest_mainloop. + + Stops at first non-None result, see :ref:`firstresult`. + + :param config: The pytest config object. + :returns: The exit code. + """ + + +def pytest_load_initial_conftests( + early_config: "Config", parser: "Parser", args: List[str] +) -> None: + """Called to implement the loading of initial conftest files ahead + of command line option parsing. + + .. note:: + This hook will not be called for ``conftest.py`` files, only for setuptools plugins. + + :param early_config: The pytest config object. + :param args: Arguments passed on the command line. + :param parser: To add command line options. + """ + + +# ------------------------------------------------------------------------- +# collection hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_collection(session: "Session") -> Optional[object]: + """Perform the collection phase for the given session. + + Stops at first non-None result, see :ref:`firstresult`. + The return value is not used, but only stops further processing. + + The default collection phase is this (see individual hooks for full details): + + 1. Starting from ``session`` as the initial collector: + + 1. ``pytest_collectstart(collector)`` + 2. ``report = pytest_make_collect_report(collector)`` + 3. ``pytest_exception_interact(collector, call, report)`` if an interactive exception occurred + 4. For each collected node: + + 1. If an item, ``pytest_itemcollected(item)`` + 2. If a collector, recurse into it. + + 5. ``pytest_collectreport(report)`` + + 2. ``pytest_collection_modifyitems(session, config, items)`` + + 1. ``pytest_deselected(items)`` for any deselected items (may be called multiple times) + + 3. ``pytest_collection_finish(session)`` + 4. Set ``session.items`` to the list of collected items + 5. Set ``session.testscollected`` to the number of collected items + + You can implement this hook to only perform some action before collection, + for example the terminal plugin uses it to start displaying the collection + counter (and returns `None`). + + :param session: The pytest session object. + """ + + +def pytest_collection_modifyitems( + session: "Session", config: "Config", items: List["Item"] +) -> None: + """Called after collection has been performed. May filter or re-order + the items in-place. + + :param session: The pytest session object. + :param config: The pytest config object. + :param items: List of item objects. + """ + + +def pytest_collection_finish(session: "Session") -> None: + """Called after collection has been performed and modified. + + :param session: The pytest session object. + """ + + +@hookspec(firstresult=True) +def pytest_ignore_collect( + collection_path: Path, path: "LEGACY_PATH", config: "Config" +) -> Optional[bool]: + """Return True to prevent considering this path for collection. + + This hook is consulted for all files and directories prior to calling + more specific hooks. + + Stops at first non-None result, see :ref:`firstresult`. + + :param collection_path: The path to analyze. + :param path: The path to analyze (deprecated). + :param config: The pytest config object. + + .. versionchanged:: 7.0.0 + The ``collection_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``path`` parameter. The ``path`` parameter + has been deprecated. + """ + + +def pytest_collect_file( + file_path: Path, path: "LEGACY_PATH", parent: "Collector" +) -> "Optional[Collector]": + """Create a :class:`~pytest.Collector` for the given path, or None if not relevant. + + The new node needs to have the specified ``parent`` as a parent. + + :param file_path: The path to analyze. + :param path: The path to collect (deprecated). + + .. versionchanged:: 7.0.0 + The ``file_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``path`` parameter. The ``path`` parameter + has been deprecated. + """ + + +# logging hooks for collection + + +def pytest_collectstart(collector: "Collector") -> None: + """Collector starts collecting. + + :param collector: + The collector. + """ + + +def pytest_itemcollected(item: "Item") -> None: + """We just collected a test item. + + :param item: + The item. + """ + + +def pytest_collectreport(report: "CollectReport") -> None: + """Collector finished collecting. + + :param report: + The collect report. + """ + + +def pytest_deselected(items: Sequence["Item"]) -> None: + """Called for deselected test items, e.g. by keyword. + + May be called multiple times. + + :param items: + The items. + """ + + +@hookspec(firstresult=True) +def pytest_make_collect_report(collector: "Collector") -> "Optional[CollectReport]": + """Perform :func:`collector.collect() ` and return + a :class:`~pytest.CollectReport`. + + Stops at first non-None result, see :ref:`firstresult`. + + :param collector: + The collector. + """ + + +# ------------------------------------------------------------------------- +# Python test function related hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_pycollect_makemodule( + module_path: Path, path: "LEGACY_PATH", parent +) -> Optional["Module"]: + """Return a :class:`pytest.Module` collector or None for the given path. + + This hook will be called for each matching test module path. + The :hook:`pytest_collect_file` hook needs to be used if you want to + create test modules for files that do not match as a test module. + + Stops at first non-None result, see :ref:`firstresult`. + + :param module_path: The path of the module to collect. + :param path: The path of the module to collect (deprecated). + + .. versionchanged:: 7.0.0 + The ``module_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``path`` parameter. + + The ``path`` parameter has been deprecated in favor of ``fspath``. + """ + + +@hookspec(firstresult=True) +def pytest_pycollect_makeitem( + collector: Union["Module", "Class"], name: str, obj: object +) -> Union[None, "Item", "Collector", List[Union["Item", "Collector"]]]: + """Return a custom item/collector for a Python object in a module, or None. + + Stops at first non-None result, see :ref:`firstresult`. + + :param collector: + The module/class collector. + :param name: + The name of the object in the module/class. + :param obj: + The object. + :returns: + The created items/collectors. + """ + + +@hookspec(firstresult=True) +def pytest_pyfunc_call(pyfuncitem: "Function") -> Optional[object]: + """Call underlying test function. + + Stops at first non-None result, see :ref:`firstresult`. + + :param pyfuncitem: + The function item. + """ + + +def pytest_generate_tests(metafunc: "Metafunc") -> None: + """Generate (multiple) parametrized calls to a test function. + + :param metafunc: + The :class:`~pytest.Metafunc` helper for the test function. + """ + + +@hookspec(firstresult=True) +def pytest_make_parametrize_id( + config: "Config", val: object, argname: str +) -> Optional[str]: + """Return a user-friendly string representation of the given ``val`` + that will be used by @pytest.mark.parametrize calls, or None if the hook + doesn't know about ``val``. + + The parameter name is available as ``argname``, if required. + + Stops at first non-None result, see :ref:`firstresult`. + + :param config: The pytest config object. + :param val: The parametrized value. + :param str argname: The automatic parameter name produced by pytest. + """ + + +# ------------------------------------------------------------------------- +# runtest related hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_runtestloop(session: "Session") -> Optional[object]: + """Perform the main runtest loop (after collection finished). + + The default hook implementation performs the runtest protocol for all items + collected in the session (``session.items``), unless the collection failed + or the ``collectonly`` pytest option is set. + + If at any point :py:func:`pytest.exit` is called, the loop is + terminated immediately. + + If at any point ``session.shouldfail`` or ``session.shouldstop`` are set, the + loop is terminated after the runtest protocol for the current item is finished. + + :param session: The pytest session object. + + Stops at first non-None result, see :ref:`firstresult`. + The return value is not used, but only stops further processing. + """ + + +@hookspec(firstresult=True) +def pytest_runtest_protocol( + item: "Item", nextitem: "Optional[Item]" +) -> Optional[object]: + """Perform the runtest protocol for a single test item. + + The default runtest protocol is this (see individual hooks for full details): + + - ``pytest_runtest_logstart(nodeid, location)`` + + - Setup phase: + - ``call = pytest_runtest_setup(item)`` (wrapped in ``CallInfo(when="setup")``) + - ``report = pytest_runtest_makereport(item, call)`` + - ``pytest_runtest_logreport(report)`` + - ``pytest_exception_interact(call, report)`` if an interactive exception occurred + + - Call phase, if the the setup passed and the ``setuponly`` pytest option is not set: + - ``call = pytest_runtest_call(item)`` (wrapped in ``CallInfo(when="call")``) + - ``report = pytest_runtest_makereport(item, call)`` + - ``pytest_runtest_logreport(report)`` + - ``pytest_exception_interact(call, report)`` if an interactive exception occurred + + - Teardown phase: + - ``call = pytest_runtest_teardown(item, nextitem)`` (wrapped in ``CallInfo(when="teardown")``) + - ``report = pytest_runtest_makereport(item, call)`` + - ``pytest_runtest_logreport(report)`` + - ``pytest_exception_interact(call, report)`` if an interactive exception occurred + + - ``pytest_runtest_logfinish(nodeid, location)`` + + :param item: Test item for which the runtest protocol is performed. + :param nextitem: The scheduled-to-be-next test item (or None if this is the end my friend). + + Stops at first non-None result, see :ref:`firstresult`. + The return value is not used, but only stops further processing. + """ + + +def pytest_runtest_logstart( + nodeid: str, location: Tuple[str, Optional[int], str] +) -> None: + """Called at the start of running the runtest protocol for a single item. + + See :hook:`pytest_runtest_protocol` for a description of the runtest protocol. + + :param nodeid: Full node ID of the item. + :param location: A tuple of ``(filename, lineno, testname)`` + where ``filename`` is a file path relative to ``config.rootpath`` + and ``lineno`` is 0-based. + """ + + +def pytest_runtest_logfinish( + nodeid: str, location: Tuple[str, Optional[int], str] +) -> None: + """Called at the end of running the runtest protocol for a single item. + + See :hook:`pytest_runtest_protocol` for a description of the runtest protocol. + + :param nodeid: Full node ID of the item. + :param location: A tuple of ``(filename, lineno, testname)`` + where ``filename`` is a file path relative to ``config.rootpath`` + and ``lineno`` is 0-based. + """ + + +def pytest_runtest_setup(item: "Item") -> None: + """Called to perform the setup phase for a test item. + + The default implementation runs ``setup()`` on ``item`` and all of its + parents (which haven't been setup yet). This includes obtaining the + values of fixtures required by the item (which haven't been obtained + yet). + + :param item: + The item. + """ + + +def pytest_runtest_call(item: "Item") -> None: + """Called to run the test for test item (the call phase). + + The default implementation calls ``item.runtest()``. + + :param item: + The item. + """ + + +def pytest_runtest_teardown(item: "Item", nextitem: Optional["Item"]) -> None: + """Called to perform the teardown phase for a test item. + + The default implementation runs the finalizers and calls ``teardown()`` + on ``item`` and all of its parents (which need to be torn down). This + includes running the teardown phase of fixtures required by the item (if + they go out of scope). + + :param item: + The item. + :param nextitem: + The scheduled-to-be-next test item (None if no further test item is + scheduled). This argument is used to perform exact teardowns, i.e. + calling just enough finalizers so that nextitem only needs to call + setup functions. + """ + + +@hookspec(firstresult=True) +def pytest_runtest_makereport( + item: "Item", call: "CallInfo[None]" +) -> Optional["TestReport"]: + """Called to create a :class:`~pytest.TestReport` for each of + the setup, call and teardown runtest phases of a test item. + + See :hook:`pytest_runtest_protocol` for a description of the runtest protocol. + + :param item: The item. + :param call: The :class:`~pytest.CallInfo` for the phase. + + Stops at first non-None result, see :ref:`firstresult`. + """ + + +def pytest_runtest_logreport(report: "TestReport") -> None: + """Process the :class:`~pytest.TestReport` produced for each + of the setup, call and teardown runtest phases of an item. + + See :hook:`pytest_runtest_protocol` for a description of the runtest protocol. + """ + + +@hookspec(firstresult=True) +def pytest_report_to_serializable( + config: "Config", + report: Union["CollectReport", "TestReport"], +) -> Optional[Dict[str, Any]]: + """Serialize the given report object into a data structure suitable for + sending over the wire, e.g. converted to JSON. + + :param config: The pytest config object. + :param report: The report. + """ + + +@hookspec(firstresult=True) +def pytest_report_from_serializable( + config: "Config", + data: Dict[str, Any], +) -> Optional[Union["CollectReport", "TestReport"]]: + """Restore a report object previously serialized with + :hook:`pytest_report_to_serializable`. + + :param config: The pytest config object. + """ + + +# ------------------------------------------------------------------------- +# Fixture related hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_fixture_setup( + fixturedef: "FixtureDef[Any]", request: "SubRequest" +) -> Optional[object]: + """Perform fixture setup execution. + + :param fixturdef: + The fixture definition object. + :param request: + The fixture request object. + :returns: + The return value of the call to the fixture function. + + Stops at first non-None result, see :ref:`firstresult`. + + .. note:: + If the fixture function returns None, other implementations of + this hook function will continue to be called, according to the + behavior of the :ref:`firstresult` option. + """ + + +def pytest_fixture_post_finalizer( + fixturedef: "FixtureDef[Any]", request: "SubRequest" +) -> None: + """Called after fixture teardown, but before the cache is cleared, so + the fixture result ``fixturedef.cached_result`` is still available (not + ``None``). + + :param fixturdef: + The fixture definition object. + :param request: + The fixture request object. + """ + + +# ------------------------------------------------------------------------- +# test session related hooks +# ------------------------------------------------------------------------- + + +def pytest_sessionstart(session: "Session") -> None: + """Called after the ``Session`` object has been created and before performing collection + and entering the run test loop. + + :param session: The pytest session object. + """ + + +def pytest_sessionfinish( + session: "Session", + exitstatus: Union[int, "ExitCode"], +) -> None: + """Called after whole test run finished, right before returning the exit status to the system. + + :param session: The pytest session object. + :param exitstatus: The status which pytest will return to the system. + """ + + +def pytest_unconfigure(config: "Config") -> None: + """Called before test process is exited. + + :param config: The pytest config object. + """ + + +# ------------------------------------------------------------------------- +# hooks for customizing the assert methods +# ------------------------------------------------------------------------- + + +def pytest_assertrepr_compare( + config: "Config", op: str, left: object, right: object +) -> Optional[List[str]]: + """Return explanation for comparisons in failing assert expressions. + + Return None for no custom explanation, otherwise return a list + of strings. The strings will be joined by newlines but any newlines + *in* a string will be escaped. Note that all but the first line will + be indented slightly, the intention is for the first line to be a summary. + + :param config: The pytest config object. + :param op: The operator, e.g. `"=="`, `"!="`, `"not in"`. + :param left: The left operand. + :param right: The right operand. + """ + + +def pytest_assertion_pass(item: "Item", lineno: int, orig: str, expl: str) -> None: + """Called whenever an assertion passes. + + .. versionadded:: 5.0 + + Use this hook to do some processing after a passing assertion. + The original assertion information is available in the `orig` string + and the pytest introspected assertion information is available in the + `expl` string. + + This hook must be explicitly enabled by the ``enable_assertion_pass_hook`` + ini-file option: + + .. code-block:: ini + + [pytest] + enable_assertion_pass_hook=true + + You need to **clean the .pyc** files in your project directory and interpreter libraries + when enabling this option, as assertions will require to be re-written. + + :param item: pytest item object of current test. + :param lineno: Line number of the assert statement. + :param orig: String with the original assertion. + :param expl: String with the assert explanation. + """ + + +# ------------------------------------------------------------------------- +# Hooks for influencing reporting (invoked from _pytest_terminal). +# ------------------------------------------------------------------------- + + +def pytest_report_header( # type:ignore[empty-body] + config: "Config", start_path: Path, startdir: "LEGACY_PATH" +) -> Union[str, List[str]]: + """Return a string or list of strings to be displayed as header info for terminal reporting. + + :param config: The pytest config object. + :param start_path: The starting dir. + :param startdir: The starting dir (deprecated). + + .. note:: + + Lines returned by a plugin are displayed before those of plugins which + ran before it. + If you want to have your line(s) displayed first, use + :ref:`trylast=True `. + + .. note:: + + This function should be implemented only in plugins or ``conftest.py`` + files situated at the tests root directory due to how pytest + :ref:`discovers plugins during startup `. + + .. versionchanged:: 7.0.0 + The ``start_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``startdir`` parameter. The ``startdir`` parameter + has been deprecated. + """ + + +def pytest_report_collectionfinish( # type:ignore[empty-body] + config: "Config", + start_path: Path, + startdir: "LEGACY_PATH", + items: Sequence["Item"], +) -> Union[str, List[str]]: + """Return a string or list of strings to be displayed after collection + has finished successfully. + + These strings will be displayed after the standard "collected X items" message. + + .. versionadded:: 3.2 + + :param config: The pytest config object. + :param start_path: The starting dir. + :param startdir: The starting dir (deprecated). + :param items: List of pytest items that are going to be executed; this list should not be modified. + + .. note:: + + Lines returned by a plugin are displayed before those of plugins which + ran before it. + If you want to have your line(s) displayed first, use + :ref:`trylast=True `. + + .. versionchanged:: 7.0.0 + The ``start_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``startdir`` parameter. The ``startdir`` parameter + has been deprecated. + """ + + +@hookspec(firstresult=True) +def pytest_report_teststatus( # type:ignore[empty-body] + report: Union["CollectReport", "TestReport"], config: "Config" +) -> "TestShortLogReport | Tuple[str, str, Union[str, Tuple[str, Mapping[str, bool]]]]": + """Return result-category, shortletter and verbose word for status + reporting. + + The result-category is a category in which to count the result, for + example "passed", "skipped", "error" or the empty string. + + The shortletter is shown as testing progresses, for example ".", "s", + "E" or the empty string. + + The verbose word is shown as testing progresses in verbose mode, for + example "PASSED", "SKIPPED", "ERROR" or the empty string. + + pytest may style these implicitly according to the report outcome. + To provide explicit styling, return a tuple for the verbose word, + for example ``"rerun", "R", ("RERUN", {"yellow": True})``. + + :param report: The report object whose status is to be returned. + :param config: The pytest config object. + :returns: The test status. + + Stops at first non-None result, see :ref:`firstresult`. + """ + + +def pytest_terminal_summary( + terminalreporter: "TerminalReporter", + exitstatus: "ExitCode", + config: "Config", +) -> None: + """Add a section to terminal summary reporting. + + :param terminalreporter: The internal terminal reporter object. + :param exitstatus: The exit status that will be reported back to the OS. + :param config: The pytest config object. + + .. versionadded:: 4.2 + The ``config`` parameter. + """ + + +@hookspec(historic=True) +def pytest_warning_recorded( + warning_message: "warnings.WarningMessage", + when: "Literal['config', 'collect', 'runtest']", + nodeid: str, + location: Optional[Tuple[str, int, str]], +) -> None: + """Process a warning captured by the internal pytest warnings plugin. + + :param warning_message: + The captured warning. This is the same object produced by :py:func:`warnings.catch_warnings`, and contains + the same attributes as the parameters of :py:func:`warnings.showwarning`. + + :param when: + Indicates when the warning was captured. Possible values: + + * ``"config"``: during pytest configuration/initialization stage. + * ``"collect"``: during test collection. + * ``"runtest"``: during test execution. + + :param nodeid: + Full id of the item. + + :param location: + When available, holds information about the execution context of the captured + warning (filename, linenumber, function). ``function`` evaluates to + when the execution context is at the module level. + + .. versionadded:: 6.0 + """ + + +# ------------------------------------------------------------------------- +# Hooks for influencing skipping +# ------------------------------------------------------------------------- + + +def pytest_markeval_namespace( # type:ignore[empty-body] + config: "Config", +) -> Dict[str, Any]: + """Called when constructing the globals dictionary used for + evaluating string conditions in xfail/skipif markers. + + This is useful when the condition for a marker requires + objects that are expensive or impossible to obtain during + collection time, which is required by normal boolean + conditions. + + .. versionadded:: 6.2 + + :param config: The pytest config object. + :returns: A dictionary of additional globals to add. + """ + + +# ------------------------------------------------------------------------- +# error handling and internal debugging hooks +# ------------------------------------------------------------------------- + + +def pytest_internalerror( + excrepr: "ExceptionRepr", + excinfo: "ExceptionInfo[BaseException]", +) -> Optional[bool]: + """Called for internal errors. + + Return True to suppress the fallback handling of printing an + INTERNALERROR message directly to sys.stderr. + + :param excrepr: The exception repr object. + :param excinfo: The exception info. + """ + + +def pytest_keyboard_interrupt( + excinfo: "ExceptionInfo[Union[KeyboardInterrupt, Exit]]", +) -> None: + """Called for keyboard interrupt. + + :param excinfo: The exception info. + """ + + +def pytest_exception_interact( + node: Union["Item", "Collector"], + call: "CallInfo[Any]", + report: Union["CollectReport", "TestReport"], +) -> None: + """Called when an exception was raised which can potentially be + interactively handled. + + May be called during collection (see :hook:`pytest_make_collect_report`), + in which case ``report`` is a :class:`CollectReport`. + + May be called during runtest of an item (see :hook:`pytest_runtest_protocol`), + in which case ``report`` is a :class:`TestReport`. + + This hook is not called if the exception that was raised is an internal + exception like ``skip.Exception``. + + :param node: + The item or collector. + :param call: + The call information. Contains the exception. + :param report: + The collection or test report. + """ + + +def pytest_enter_pdb(config: "Config", pdb: "pdb.Pdb") -> None: + """Called upon pdb.set_trace(). + + Can be used by plugins to take special action just before the python + debugger enters interactive mode. + + :param config: The pytest config object. + :param pdb: The Pdb instance. + """ + + +def pytest_leave_pdb(config: "Config", pdb: "pdb.Pdb") -> None: + """Called when leaving pdb (e.g. with continue after pdb.set_trace()). + + Can be used by plugins to take special action just after the python + debugger leaves interactive mode. + + :param config: The pytest config object. + :param pdb: The Pdb instance. + """ diff --git a/venv/lib/python3.12/site-packages/_pytest/junitxml.py b/venv/lib/python3.12/site-packages/_pytest/junitxml.py new file mode 100644 index 0000000000..9ee35b84e8 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/junitxml.py @@ -0,0 +1,700 @@ +"""Report test results in JUnit-XML format, for use with Jenkins and build +integration servers. + +Based on initial code from Ross Lawley. + +Output conforms to +https://github.com/jenkinsci/xunit-plugin/blob/master/src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd +""" +import functools +import os +import platform +import re +import xml.etree.ElementTree as ET +from datetime import datetime +from typing import Callable +from typing import Dict +from typing import List +from typing import Match +from typing import Optional +from typing import Tuple +from typing import Union + +import pytest +from _pytest import nodes +from _pytest import timing +from _pytest._code.code import ExceptionRepr +from _pytest._code.code import ReprFileLocation +from _pytest.config import Config +from _pytest.config import filename_arg +from _pytest.config.argparsing import Parser +from _pytest.fixtures import FixtureRequest +from _pytest.reports import TestReport +from _pytest.stash import StashKey +from _pytest.terminal import TerminalReporter + + +xml_key = StashKey["LogXML"]() + + +def bin_xml_escape(arg: object) -> str: + r"""Visually escape invalid XML characters. + + For example, transforms + 'hello\aworld\b' + into + 'hello#x07world#x08' + Note that the #xABs are *not* XML escapes - missing the ampersand «. + The idea is to escape visually for the user rather than for XML itself. + """ + + def repl(matchobj: Match[str]) -> str: + i = ord(matchobj.group()) + if i <= 0xFF: + return "#x%02X" % i + else: + return "#x%04X" % i + + # The spec range of valid chars is: + # Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF] + # For an unknown(?) reason, we disallow #x7F (DEL) as well. + illegal_xml_re = ( + "[^\u0009\u000A\u000D\u0020-\u007E\u0080-\uD7FF\uE000-\uFFFD\u10000-\u10FFFF]" + ) + return re.sub(illegal_xml_re, repl, str(arg)) + + +def merge_family(left, right) -> None: + result = {} + for kl, vl in left.items(): + for kr, vr in right.items(): + if not isinstance(vl, list): + raise TypeError(type(vl)) + result[kl] = vl + vr + left.update(result) + + +families = {} +families["_base"] = {"testcase": ["classname", "name"]} +families["_base_legacy"] = {"testcase": ["file", "line", "url"]} + +# xUnit 1.x inherits legacy attributes. +families["xunit1"] = families["_base"].copy() +merge_family(families["xunit1"], families["_base_legacy"]) + +# xUnit 2.x uses strict base attributes. +families["xunit2"] = families["_base"] + + +class _NodeReporter: + def __init__(self, nodeid: Union[str, TestReport], xml: "LogXML") -> None: + self.id = nodeid + self.xml = xml + self.add_stats = self.xml.add_stats + self.family = self.xml.family + self.duration = 0.0 + self.properties: List[Tuple[str, str]] = [] + self.nodes: List[ET.Element] = [] + self.attrs: Dict[str, str] = {} + + def append(self, node: ET.Element) -> None: + self.xml.add_stats(node.tag) + self.nodes.append(node) + + def add_property(self, name: str, value: object) -> None: + self.properties.append((str(name), bin_xml_escape(value))) + + def add_attribute(self, name: str, value: object) -> None: + self.attrs[str(name)] = bin_xml_escape(value) + + def make_properties_node(self) -> Optional[ET.Element]: + """Return a Junit node containing custom properties, if any.""" + if self.properties: + properties = ET.Element("properties") + for name, value in self.properties: + properties.append(ET.Element("property", name=name, value=value)) + return properties + return None + + def record_testreport(self, testreport: TestReport) -> None: + names = mangle_test_address(testreport.nodeid) + existing_attrs = self.attrs + classnames = names[:-1] + if self.xml.prefix: + classnames.insert(0, self.xml.prefix) + attrs: Dict[str, str] = { + "classname": ".".join(classnames), + "name": bin_xml_escape(names[-1]), + "file": testreport.location[0], + } + if testreport.location[1] is not None: + attrs["line"] = str(testreport.location[1]) + if hasattr(testreport, "url"): + attrs["url"] = testreport.url + self.attrs = attrs + self.attrs.update(existing_attrs) # Restore any user-defined attributes. + + # Preserve legacy testcase behavior. + if self.family == "xunit1": + return + + # Filter out attributes not permitted by this test family. + # Including custom attributes because they are not valid here. + temp_attrs = {} + for key in self.attrs.keys(): + if key in families[self.family]["testcase"]: + temp_attrs[key] = self.attrs[key] + self.attrs = temp_attrs + + def to_xml(self) -> ET.Element: + testcase = ET.Element("testcase", self.attrs, time="%.3f" % self.duration) + properties = self.make_properties_node() + if properties is not None: + testcase.append(properties) + testcase.extend(self.nodes) + return testcase + + def _add_simple(self, tag: str, message: str, data: Optional[str] = None) -> None: + node = ET.Element(tag, message=message) + node.text = bin_xml_escape(data) + self.append(node) + + def write_captured_output(self, report: TestReport) -> None: + if not self.xml.log_passing_tests and report.passed: + return + + content_out = report.capstdout + content_log = report.caplog + content_err = report.capstderr + if self.xml.logging == "no": + return + content_all = "" + if self.xml.logging in ["log", "all"]: + content_all = self._prepare_content(content_log, " Captured Log ") + if self.xml.logging in ["system-out", "out-err", "all"]: + content_all += self._prepare_content(content_out, " Captured Out ") + self._write_content(report, content_all, "system-out") + content_all = "" + if self.xml.logging in ["system-err", "out-err", "all"]: + content_all += self._prepare_content(content_err, " Captured Err ") + self._write_content(report, content_all, "system-err") + content_all = "" + if content_all: + self._write_content(report, content_all, "system-out") + + def _prepare_content(self, content: str, header: str) -> str: + return "\n".join([header.center(80, "-"), content, ""]) + + def _write_content(self, report: TestReport, content: str, jheader: str) -> None: + tag = ET.Element(jheader) + tag.text = bin_xml_escape(content) + self.append(tag) + + def append_pass(self, report: TestReport) -> None: + self.add_stats("passed") + + def append_failure(self, report: TestReport) -> None: + # msg = str(report.longrepr.reprtraceback.extraline) + if hasattr(report, "wasxfail"): + self._add_simple("skipped", "xfail-marked test passes unexpectedly") + else: + assert report.longrepr is not None + reprcrash: Optional[ReprFileLocation] = getattr( + report.longrepr, "reprcrash", None + ) + if reprcrash is not None: + message = reprcrash.message + else: + message = str(report.longrepr) + message = bin_xml_escape(message) + self._add_simple("failure", message, str(report.longrepr)) + + def append_collect_error(self, report: TestReport) -> None: + # msg = str(report.longrepr.reprtraceback.extraline) + assert report.longrepr is not None + self._add_simple("error", "collection failure", str(report.longrepr)) + + def append_collect_skipped(self, report: TestReport) -> None: + self._add_simple("skipped", "collection skipped", str(report.longrepr)) + + def append_error(self, report: TestReport) -> None: + assert report.longrepr is not None + reprcrash: Optional[ReprFileLocation] = getattr( + report.longrepr, "reprcrash", None + ) + if reprcrash is not None: + reason = reprcrash.message + else: + reason = str(report.longrepr) + + if report.when == "teardown": + msg = f'failed on teardown with "{reason}"' + else: + msg = f'failed on setup with "{reason}"' + self._add_simple("error", bin_xml_escape(msg), str(report.longrepr)) + + def append_skipped(self, report: TestReport) -> None: + if hasattr(report, "wasxfail"): + xfailreason = report.wasxfail + if xfailreason.startswith("reason: "): + xfailreason = xfailreason[8:] + xfailreason = bin_xml_escape(xfailreason) + skipped = ET.Element("skipped", type="pytest.xfail", message=xfailreason) + self.append(skipped) + else: + assert isinstance(report.longrepr, tuple) + filename, lineno, skipreason = report.longrepr + if skipreason.startswith("Skipped: "): + skipreason = skipreason[9:] + details = f"{filename}:{lineno}: {skipreason}" + + skipped = ET.Element("skipped", type="pytest.skip", message=skipreason) + skipped.text = bin_xml_escape(details) + self.append(skipped) + self.write_captured_output(report) + + def finalize(self) -> None: + data = self.to_xml() + self.__dict__.clear() + # Type ignored because mypy doesn't like overriding a method. + # Also the return value doesn't match... + self.to_xml = lambda: data # type: ignore[assignment] + + +def _warn_incompatibility_with_xunit2( + request: FixtureRequest, fixture_name: str +) -> None: + """Emit a PytestWarning about the given fixture being incompatible with newer xunit revisions.""" + from _pytest.warning_types import PytestWarning + + xml = request.config.stash.get(xml_key, None) + if xml is not None and xml.family not in ("xunit1", "legacy"): + request.node.warn( + PytestWarning( + "{fixture_name} is incompatible with junit_family '{family}' (use 'legacy' or 'xunit1')".format( + fixture_name=fixture_name, family=xml.family + ) + ) + ) + + +@pytest.fixture +def record_property(request: FixtureRequest) -> Callable[[str, object], None]: + """Add extra properties to the calling test. + + User properties become part of the test report and are available to the + configured reporters, like JUnit XML. + + The fixture is callable with ``name, value``. The value is automatically + XML-encoded. + + Example:: + + def test_function(record_property): + record_property("example_key", 1) + """ + _warn_incompatibility_with_xunit2(request, "record_property") + + def append_property(name: str, value: object) -> None: + request.node.user_properties.append((name, value)) + + return append_property + + +@pytest.fixture +def record_xml_attribute(request: FixtureRequest) -> Callable[[str, object], None]: + """Add extra xml attributes to the tag for the calling test. + + The fixture is callable with ``name, value``. The value is + automatically XML-encoded. + """ + from _pytest.warning_types import PytestExperimentalApiWarning + + request.node.warn( + PytestExperimentalApiWarning("record_xml_attribute is an experimental feature") + ) + + _warn_incompatibility_with_xunit2(request, "record_xml_attribute") + + # Declare noop + def add_attr_noop(name: str, value: object) -> None: + pass + + attr_func = add_attr_noop + + xml = request.config.stash.get(xml_key, None) + if xml is not None: + node_reporter = xml.node_reporter(request.node.nodeid) + attr_func = node_reporter.add_attribute + + return attr_func + + +def _check_record_param_type(param: str, v: str) -> None: + """Used by record_testsuite_property to check that the given parameter name is of the proper + type.""" + __tracebackhide__ = True + if not isinstance(v, str): + msg = "{param} parameter needs to be a string, but {g} given" # type: ignore[unreachable] + raise TypeError(msg.format(param=param, g=type(v).__name__)) + + +@pytest.fixture(scope="session") +def record_testsuite_property(request: FixtureRequest) -> Callable[[str, object], None]: + """Record a new ```` tag as child of the root ````. + + This is suitable to writing global information regarding the entire test + suite, and is compatible with ``xunit2`` JUnit family. + + This is a ``session``-scoped fixture which is called with ``(name, value)``. Example: + + .. code-block:: python + + def test_foo(record_testsuite_property): + record_testsuite_property("ARCH", "PPC") + record_testsuite_property("STORAGE_TYPE", "CEPH") + + :param name: + The property name. + :param value: + The property value. Will be converted to a string. + + .. warning:: + + Currently this fixture **does not work** with the + `pytest-xdist `__ plugin. See + :issue:`7767` for details. + """ + + __tracebackhide__ = True + + def record_func(name: str, value: object) -> None: + """No-op function in case --junit-xml was not passed in the command-line.""" + __tracebackhide__ = True + _check_record_param_type("name", name) + + xml = request.config.stash.get(xml_key, None) + if xml is not None: + record_func = xml.add_global_property # noqa + return record_func + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("terminal reporting") + group.addoption( + "--junitxml", + "--junit-xml", + action="store", + dest="xmlpath", + metavar="path", + type=functools.partial(filename_arg, optname="--junitxml"), + default=None, + help="Create junit-xml style report file at given path", + ) + group.addoption( + "--junitprefix", + "--junit-prefix", + action="store", + metavar="str", + default=None, + help="Prepend prefix to classnames in junit-xml output", + ) + parser.addini( + "junit_suite_name", "Test suite name for JUnit report", default="pytest" + ) + parser.addini( + "junit_logging", + "Write captured log messages to JUnit report: " + "one of no|log|system-out|system-err|out-err|all", + default="no", + ) + parser.addini( + "junit_log_passing_tests", + "Capture log information for passing tests to JUnit report: ", + type="bool", + default=True, + ) + parser.addini( + "junit_duration_report", + "Duration time to report: one of total|call", + default="total", + ) # choices=['total', 'call']) + parser.addini( + "junit_family", + "Emit XML for schema: one of legacy|xunit1|xunit2", + default="xunit2", + ) + + +def pytest_configure(config: Config) -> None: + xmlpath = config.option.xmlpath + # Prevent opening xmllog on worker nodes (xdist). + if xmlpath and not hasattr(config, "workerinput"): + junit_family = config.getini("junit_family") + config.stash[xml_key] = LogXML( + xmlpath, + config.option.junitprefix, + config.getini("junit_suite_name"), + config.getini("junit_logging"), + config.getini("junit_duration_report"), + junit_family, + config.getini("junit_log_passing_tests"), + ) + config.pluginmanager.register(config.stash[xml_key]) + + +def pytest_unconfigure(config: Config) -> None: + xml = config.stash.get(xml_key, None) + if xml: + del config.stash[xml_key] + config.pluginmanager.unregister(xml) + + +def mangle_test_address(address: str) -> List[str]: + path, possible_open_bracket, params = address.partition("[") + names = path.split("::") + # Convert file path to dotted path. + names[0] = names[0].replace(nodes.SEP, ".") + names[0] = re.sub(r"\.py$", "", names[0]) + # Put any params back. + names[-1] += possible_open_bracket + params + return names + + +class LogXML: + def __init__( + self, + logfile, + prefix: Optional[str], + suite_name: str = "pytest", + logging: str = "no", + report_duration: str = "total", + family="xunit1", + log_passing_tests: bool = True, + ) -> None: + logfile = os.path.expanduser(os.path.expandvars(logfile)) + self.logfile = os.path.normpath(os.path.abspath(logfile)) + self.prefix = prefix + self.suite_name = suite_name + self.logging = logging + self.log_passing_tests = log_passing_tests + self.report_duration = report_duration + self.family = family + self.stats: Dict[str, int] = dict.fromkeys( + ["error", "passed", "failure", "skipped"], 0 + ) + self.node_reporters: Dict[ + Tuple[Union[str, TestReport], object], _NodeReporter + ] = {} + self.node_reporters_ordered: List[_NodeReporter] = [] + self.global_properties: List[Tuple[str, str]] = [] + + # List of reports that failed on call but teardown is pending. + self.open_reports: List[TestReport] = [] + self.cnt_double_fail_tests = 0 + + # Replaces convenience family with real family. + if self.family == "legacy": + self.family = "xunit1" + + def finalize(self, report: TestReport) -> None: + nodeid = getattr(report, "nodeid", report) + # Local hack to handle xdist report order. + workernode = getattr(report, "node", None) + reporter = self.node_reporters.pop((nodeid, workernode)) + + for propname, propvalue in report.user_properties: + reporter.add_property(propname, str(propvalue)) + + if reporter is not None: + reporter.finalize() + + def node_reporter(self, report: Union[TestReport, str]) -> _NodeReporter: + nodeid: Union[str, TestReport] = getattr(report, "nodeid", report) + # Local hack to handle xdist report order. + workernode = getattr(report, "node", None) + + key = nodeid, workernode + + if key in self.node_reporters: + # TODO: breaks for --dist=each + return self.node_reporters[key] + + reporter = _NodeReporter(nodeid, self) + + self.node_reporters[key] = reporter + self.node_reporters_ordered.append(reporter) + + return reporter + + def add_stats(self, key: str) -> None: + if key in self.stats: + self.stats[key] += 1 + + def _opentestcase(self, report: TestReport) -> _NodeReporter: + reporter = self.node_reporter(report) + reporter.record_testreport(report) + return reporter + + def pytest_runtest_logreport(self, report: TestReport) -> None: + """Handle a setup/call/teardown report, generating the appropriate + XML tags as necessary. + + Note: due to plugins like xdist, this hook may be called in interlaced + order with reports from other nodes. For example: + + Usual call order: + -> setup node1 + -> call node1 + -> teardown node1 + -> setup node2 + -> call node2 + -> teardown node2 + + Possible call order in xdist: + -> setup node1 + -> call node1 + -> setup node2 + -> call node2 + -> teardown node2 + -> teardown node1 + """ + close_report = None + if report.passed: + if report.when == "call": # ignore setup/teardown + reporter = self._opentestcase(report) + reporter.append_pass(report) + elif report.failed: + if report.when == "teardown": + # The following vars are needed when xdist plugin is used. + report_wid = getattr(report, "worker_id", None) + report_ii = getattr(report, "item_index", None) + close_report = next( + ( + rep + for rep in self.open_reports + if ( + rep.nodeid == report.nodeid + and getattr(rep, "item_index", None) == report_ii + and getattr(rep, "worker_id", None) == report_wid + ) + ), + None, + ) + if close_report: + # We need to open new testcase in case we have failure in + # call and error in teardown in order to follow junit + # schema. + self.finalize(close_report) + self.cnt_double_fail_tests += 1 + reporter = self._opentestcase(report) + if report.when == "call": + reporter.append_failure(report) + self.open_reports.append(report) + if not self.log_passing_tests: + reporter.write_captured_output(report) + else: + reporter.append_error(report) + elif report.skipped: + reporter = self._opentestcase(report) + reporter.append_skipped(report) + self.update_testcase_duration(report) + if report.when == "teardown": + reporter = self._opentestcase(report) + reporter.write_captured_output(report) + + self.finalize(report) + report_wid = getattr(report, "worker_id", None) + report_ii = getattr(report, "item_index", None) + close_report = next( + ( + rep + for rep in self.open_reports + if ( + rep.nodeid == report.nodeid + and getattr(rep, "item_index", None) == report_ii + and getattr(rep, "worker_id", None) == report_wid + ) + ), + None, + ) + if close_report: + self.open_reports.remove(close_report) + + def update_testcase_duration(self, report: TestReport) -> None: + """Accumulate total duration for nodeid from given report and update + the Junit.testcase with the new total if already created.""" + if self.report_duration == "total" or report.when == self.report_duration: + reporter = self.node_reporter(report) + reporter.duration += getattr(report, "duration", 0.0) + + def pytest_collectreport(self, report: TestReport) -> None: + if not report.passed: + reporter = self._opentestcase(report) + if report.failed: + reporter.append_collect_error(report) + else: + reporter.append_collect_skipped(report) + + def pytest_internalerror(self, excrepr: ExceptionRepr) -> None: + reporter = self.node_reporter("internal") + reporter.attrs.update(classname="pytest", name="internal") + reporter._add_simple("error", "internal error", str(excrepr)) + + def pytest_sessionstart(self) -> None: + self.suite_start_time = timing.time() + + def pytest_sessionfinish(self) -> None: + dirname = os.path.dirname(os.path.abspath(self.logfile)) + # exist_ok avoids filesystem race conditions between checking path existence and requesting creation + os.makedirs(dirname, exist_ok=True) + + with open(self.logfile, "w", encoding="utf-8") as logfile: + suite_stop_time = timing.time() + suite_time_delta = suite_stop_time - self.suite_start_time + + numtests = ( + self.stats["passed"] + + self.stats["failure"] + + self.stats["skipped"] + + self.stats["error"] + - self.cnt_double_fail_tests + ) + logfile.write('') + + suite_node = ET.Element( + "testsuite", + name=self.suite_name, + errors=str(self.stats["error"]), + failures=str(self.stats["failure"]), + skipped=str(self.stats["skipped"]), + tests=str(numtests), + time="%.3f" % suite_time_delta, + timestamp=datetime.fromtimestamp(self.suite_start_time).isoformat(), + hostname=platform.node(), + ) + global_properties = self._get_global_properties_node() + if global_properties is not None: + suite_node.append(global_properties) + for node_reporter in self.node_reporters_ordered: + suite_node.append(node_reporter.to_xml()) + testsuites = ET.Element("testsuites") + testsuites.append(suite_node) + logfile.write(ET.tostring(testsuites, encoding="unicode")) + + def pytest_terminal_summary(self, terminalreporter: TerminalReporter) -> None: + terminalreporter.write_sep("-", f"generated xml file: {self.logfile}") + + def add_global_property(self, name: str, value: object) -> None: + __tracebackhide__ = True + _check_record_param_type("name", name) + self.global_properties.append((name, bin_xml_escape(value))) + + def _get_global_properties_node(self) -> Optional[ET.Element]: + """Return a Junit node containing custom properties, if any.""" + if self.global_properties: + properties = ET.Element("properties") + for name, value in self.global_properties: + properties.append(ET.Element("property", name=name, value=value)) + return properties + return None diff --git a/venv/lib/python3.12/site-packages/_pytest/legacypath.py b/venv/lib/python3.12/site-packages/_pytest/legacypath.py new file mode 100644 index 0000000000..af1d0c07e3 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/legacypath.py @@ -0,0 +1,479 @@ +"""Add backward compatibility support for the legacy py path type.""" +import dataclasses +import shlex +import subprocess +from pathlib import Path +from typing import List +from typing import Optional +from typing import TYPE_CHECKING +from typing import Union + +from iniconfig import SectionWrapper + +from _pytest.cacheprovider import Cache +from _pytest.compat import final +from _pytest.compat import LEGACY_PATH +from _pytest.compat import legacy_path +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config import PytestPluginManager +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.main import Session +from _pytest.monkeypatch import MonkeyPatch +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.nodes import Node +from _pytest.pytester import HookRecorder +from _pytest.pytester import Pytester +from _pytest.pytester import RunResult +from _pytest.terminal import TerminalReporter +from _pytest.tmpdir import TempPathFactory + +if TYPE_CHECKING: + from typing_extensions import Final + + import pexpect + + +@final +class Testdir: + """ + Similar to :class:`Pytester`, but this class works with legacy legacy_path objects instead. + + All methods just forward to an internal :class:`Pytester` instance, converting results + to `legacy_path` objects as necessary. + """ + + __test__ = False + + CLOSE_STDIN: "Final" = Pytester.CLOSE_STDIN + TimeoutExpired: "Final" = Pytester.TimeoutExpired + + def __init__(self, pytester: Pytester, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + self._pytester = pytester + + @property + def tmpdir(self) -> LEGACY_PATH: + """Temporary directory where tests are executed.""" + return legacy_path(self._pytester.path) + + @property + def test_tmproot(self) -> LEGACY_PATH: + return legacy_path(self._pytester._test_tmproot) + + @property + def request(self): + return self._pytester._request + + @property + def plugins(self): + return self._pytester.plugins + + @plugins.setter + def plugins(self, plugins): + self._pytester.plugins = plugins + + @property + def monkeypatch(self) -> MonkeyPatch: + return self._pytester._monkeypatch + + def make_hook_recorder(self, pluginmanager) -> HookRecorder: + """See :meth:`Pytester.make_hook_recorder`.""" + return self._pytester.make_hook_recorder(pluginmanager) + + def chdir(self) -> None: + """See :meth:`Pytester.chdir`.""" + return self._pytester.chdir() + + def finalize(self) -> None: + """See :meth:`Pytester._finalize`.""" + return self._pytester._finalize() + + def makefile(self, ext, *args, **kwargs) -> LEGACY_PATH: + """See :meth:`Pytester.makefile`.""" + if ext and not ext.startswith("."): + # pytester.makefile is going to throw a ValueError in a way that + # testdir.makefile did not, because + # pathlib.Path is stricter suffixes than py.path + # This ext arguments is likely user error, but since testdir has + # allowed this, we will prepend "." as a workaround to avoid breaking + # testdir usage that worked before + ext = "." + ext + return legacy_path(self._pytester.makefile(ext, *args, **kwargs)) + + def makeconftest(self, source) -> LEGACY_PATH: + """See :meth:`Pytester.makeconftest`.""" + return legacy_path(self._pytester.makeconftest(source)) + + def makeini(self, source) -> LEGACY_PATH: + """See :meth:`Pytester.makeini`.""" + return legacy_path(self._pytester.makeini(source)) + + def getinicfg(self, source: str) -> SectionWrapper: + """See :meth:`Pytester.getinicfg`.""" + return self._pytester.getinicfg(source) + + def makepyprojecttoml(self, source) -> LEGACY_PATH: + """See :meth:`Pytester.makepyprojecttoml`.""" + return legacy_path(self._pytester.makepyprojecttoml(source)) + + def makepyfile(self, *args, **kwargs) -> LEGACY_PATH: + """See :meth:`Pytester.makepyfile`.""" + return legacy_path(self._pytester.makepyfile(*args, **kwargs)) + + def maketxtfile(self, *args, **kwargs) -> LEGACY_PATH: + """See :meth:`Pytester.maketxtfile`.""" + return legacy_path(self._pytester.maketxtfile(*args, **kwargs)) + + def syspathinsert(self, path=None) -> None: + """See :meth:`Pytester.syspathinsert`.""" + return self._pytester.syspathinsert(path) + + def mkdir(self, name) -> LEGACY_PATH: + """See :meth:`Pytester.mkdir`.""" + return legacy_path(self._pytester.mkdir(name)) + + def mkpydir(self, name) -> LEGACY_PATH: + """See :meth:`Pytester.mkpydir`.""" + return legacy_path(self._pytester.mkpydir(name)) + + def copy_example(self, name=None) -> LEGACY_PATH: + """See :meth:`Pytester.copy_example`.""" + return legacy_path(self._pytester.copy_example(name)) + + def getnode(self, config: Config, arg) -> Optional[Union[Item, Collector]]: + """See :meth:`Pytester.getnode`.""" + return self._pytester.getnode(config, arg) + + def getpathnode(self, path): + """See :meth:`Pytester.getpathnode`.""" + return self._pytester.getpathnode(path) + + def genitems(self, colitems: List[Union[Item, Collector]]) -> List[Item]: + """See :meth:`Pytester.genitems`.""" + return self._pytester.genitems(colitems) + + def runitem(self, source): + """See :meth:`Pytester.runitem`.""" + return self._pytester.runitem(source) + + def inline_runsource(self, source, *cmdlineargs): + """See :meth:`Pytester.inline_runsource`.""" + return self._pytester.inline_runsource(source, *cmdlineargs) + + def inline_genitems(self, *args): + """See :meth:`Pytester.inline_genitems`.""" + return self._pytester.inline_genitems(*args) + + def inline_run(self, *args, plugins=(), no_reraise_ctrlc: bool = False): + """See :meth:`Pytester.inline_run`.""" + return self._pytester.inline_run( + *args, plugins=plugins, no_reraise_ctrlc=no_reraise_ctrlc + ) + + def runpytest_inprocess(self, *args, **kwargs) -> RunResult: + """See :meth:`Pytester.runpytest_inprocess`.""" + return self._pytester.runpytest_inprocess(*args, **kwargs) + + def runpytest(self, *args, **kwargs) -> RunResult: + """See :meth:`Pytester.runpytest`.""" + return self._pytester.runpytest(*args, **kwargs) + + def parseconfig(self, *args) -> Config: + """See :meth:`Pytester.parseconfig`.""" + return self._pytester.parseconfig(*args) + + def parseconfigure(self, *args) -> Config: + """See :meth:`Pytester.parseconfigure`.""" + return self._pytester.parseconfigure(*args) + + def getitem(self, source, funcname="test_func"): + """See :meth:`Pytester.getitem`.""" + return self._pytester.getitem(source, funcname) + + def getitems(self, source): + """See :meth:`Pytester.getitems`.""" + return self._pytester.getitems(source) + + def getmodulecol(self, source, configargs=(), withinit=False): + """See :meth:`Pytester.getmodulecol`.""" + return self._pytester.getmodulecol( + source, configargs=configargs, withinit=withinit + ) + + def collect_by_name( + self, modcol: Collector, name: str + ) -> Optional[Union[Item, Collector]]: + """See :meth:`Pytester.collect_by_name`.""" + return self._pytester.collect_by_name(modcol, name) + + def popen( + self, + cmdargs, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=CLOSE_STDIN, + **kw, + ): + """See :meth:`Pytester.popen`.""" + return self._pytester.popen(cmdargs, stdout, stderr, stdin, **kw) + + def run(self, *cmdargs, timeout=None, stdin=CLOSE_STDIN) -> RunResult: + """See :meth:`Pytester.run`.""" + return self._pytester.run(*cmdargs, timeout=timeout, stdin=stdin) + + def runpython(self, script) -> RunResult: + """See :meth:`Pytester.runpython`.""" + return self._pytester.runpython(script) + + def runpython_c(self, command): + """See :meth:`Pytester.runpython_c`.""" + return self._pytester.runpython_c(command) + + def runpytest_subprocess(self, *args, timeout=None) -> RunResult: + """See :meth:`Pytester.runpytest_subprocess`.""" + return self._pytester.runpytest_subprocess(*args, timeout=timeout) + + def spawn_pytest( + self, string: str, expect_timeout: float = 10.0 + ) -> "pexpect.spawn": + """See :meth:`Pytester.spawn_pytest`.""" + return self._pytester.spawn_pytest(string, expect_timeout=expect_timeout) + + def spawn(self, cmd: str, expect_timeout: float = 10.0) -> "pexpect.spawn": + """See :meth:`Pytester.spawn`.""" + return self._pytester.spawn(cmd, expect_timeout=expect_timeout) + + def __repr__(self) -> str: + return f"" + + def __str__(self) -> str: + return str(self.tmpdir) + + +class LegacyTestdirPlugin: + @staticmethod + @fixture + def testdir(pytester: Pytester) -> Testdir: + """ + Identical to :fixture:`pytester`, and provides an instance whose methods return + legacy ``LEGACY_PATH`` objects instead when applicable. + + New code should avoid using :fixture:`testdir` in favor of :fixture:`pytester`. + """ + return Testdir(pytester, _ispytest=True) + + +@final +@dataclasses.dataclass +class TempdirFactory: + """Backward compatibility wrapper that implements :class:`py.path.local` + for :class:`TempPathFactory`. + + .. note:: + These days, it is preferred to use ``tmp_path_factory``. + + :ref:`About the tmpdir and tmpdir_factory fixtures`. + + """ + + _tmppath_factory: TempPathFactory + + def __init__( + self, tmppath_factory: TempPathFactory, *, _ispytest: bool = False + ) -> None: + check_ispytest(_ispytest) + self._tmppath_factory = tmppath_factory + + def mktemp(self, basename: str, numbered: bool = True) -> LEGACY_PATH: + """Same as :meth:`TempPathFactory.mktemp`, but returns a :class:`py.path.local` object.""" + return legacy_path(self._tmppath_factory.mktemp(basename, numbered).resolve()) + + def getbasetemp(self) -> LEGACY_PATH: + """Same as :meth:`TempPathFactory.getbasetemp`, but returns a :class:`py.path.local` object.""" + return legacy_path(self._tmppath_factory.getbasetemp().resolve()) + + +class LegacyTmpdirPlugin: + @staticmethod + @fixture(scope="session") + def tmpdir_factory(request: FixtureRequest) -> TempdirFactory: + """Return a :class:`pytest.TempdirFactory` instance for the test session.""" + # Set dynamically by pytest_configure(). + return request.config._tmpdirhandler # type: ignore + + @staticmethod + @fixture + def tmpdir(tmp_path: Path) -> LEGACY_PATH: + """Return a temporary directory path object which is unique to each test + function invocation, created as a sub directory of the base temporary + directory. + + By default, a new base temporary directory is created each test session, + and old bases are removed after 3 sessions, to aid in debugging. If + ``--basetemp`` is used then it is cleared each session. See :ref:`base + temporary directory`. + + The returned object is a `legacy_path`_ object. + + .. note:: + These days, it is preferred to use ``tmp_path``. + + :ref:`About the tmpdir and tmpdir_factory fixtures`. + + .. _legacy_path: https://py.readthedocs.io/en/latest/path.html + """ + return legacy_path(tmp_path) + + +def Cache_makedir(self: Cache, name: str) -> LEGACY_PATH: + """Return a directory path object with the given name. + + Same as :func:`mkdir`, but returns a legacy py path instance. + """ + return legacy_path(self.mkdir(name)) + + +def FixtureRequest_fspath(self: FixtureRequest) -> LEGACY_PATH: + """(deprecated) The file system path of the test module which collected this test.""" + return legacy_path(self.path) + + +def TerminalReporter_startdir(self: TerminalReporter) -> LEGACY_PATH: + """The directory from which pytest was invoked. + + Prefer to use ``startpath`` which is a :class:`pathlib.Path`. + + :type: LEGACY_PATH + """ + return legacy_path(self.startpath) + + +def Config_invocation_dir(self: Config) -> LEGACY_PATH: + """The directory from which pytest was invoked. + + Prefer to use :attr:`invocation_params.dir `, + which is a :class:`pathlib.Path`. + + :type: LEGACY_PATH + """ + return legacy_path(str(self.invocation_params.dir)) + + +def Config_rootdir(self: Config) -> LEGACY_PATH: + """The path to the :ref:`rootdir `. + + Prefer to use :attr:`rootpath`, which is a :class:`pathlib.Path`. + + :type: LEGACY_PATH + """ + return legacy_path(str(self.rootpath)) + + +def Config_inifile(self: Config) -> Optional[LEGACY_PATH]: + """The path to the :ref:`configfile `. + + Prefer to use :attr:`inipath`, which is a :class:`pathlib.Path`. + + :type: Optional[LEGACY_PATH] + """ + return legacy_path(str(self.inipath)) if self.inipath else None + + +def Session_stardir(self: Session) -> LEGACY_PATH: + """The path from which pytest was invoked. + + Prefer to use ``startpath`` which is a :class:`pathlib.Path`. + + :type: LEGACY_PATH + """ + return legacy_path(self.startpath) + + +def Config__getini_unknown_type( + self, name: str, type: str, value: Union[str, List[str]] +): + if type == "pathlist": + # TODO: This assert is probably not valid in all cases. + assert self.inipath is not None + dp = self.inipath.parent + input_values = shlex.split(value) if isinstance(value, str) else value + return [legacy_path(str(dp / x)) for x in input_values] + else: + raise ValueError(f"unknown configuration type: {type}", value) + + +def Node_fspath(self: Node) -> LEGACY_PATH: + """(deprecated) returns a legacy_path copy of self.path""" + return legacy_path(self.path) + + +def Node_fspath_set(self: Node, value: LEGACY_PATH) -> None: + self.path = Path(value) + + +@hookimpl(tryfirst=True) +def pytest_load_initial_conftests(early_config: Config) -> None: + """Monkeypatch legacy path attributes in several classes, as early as possible.""" + mp = MonkeyPatch() + early_config.add_cleanup(mp.undo) + + # Add Cache.makedir(). + mp.setattr(Cache, "makedir", Cache_makedir, raising=False) + + # Add FixtureRequest.fspath property. + mp.setattr(FixtureRequest, "fspath", property(FixtureRequest_fspath), raising=False) + + # Add TerminalReporter.startdir property. + mp.setattr( + TerminalReporter, "startdir", property(TerminalReporter_startdir), raising=False + ) + + # Add Config.{invocation_dir,rootdir,inifile} properties. + mp.setattr(Config, "invocation_dir", property(Config_invocation_dir), raising=False) + mp.setattr(Config, "rootdir", property(Config_rootdir), raising=False) + mp.setattr(Config, "inifile", property(Config_inifile), raising=False) + + # Add Session.startdir property. + mp.setattr(Session, "startdir", property(Session_stardir), raising=False) + + # Add pathlist configuration type. + mp.setattr(Config, "_getini_unknown_type", Config__getini_unknown_type) + + # Add Node.fspath property. + mp.setattr(Node, "fspath", property(Node_fspath, Node_fspath_set), raising=False) + + +@hookimpl +def pytest_configure(config: Config) -> None: + """Installs the LegacyTmpdirPlugin if the ``tmpdir`` plugin is also installed.""" + if config.pluginmanager.has_plugin("tmpdir"): + mp = MonkeyPatch() + config.add_cleanup(mp.undo) + # Create TmpdirFactory and attach it to the config object. + # + # This is to comply with existing plugins which expect the handler to be + # available at pytest_configure time, but ideally should be moved entirely + # to the tmpdir_factory session fixture. + try: + tmp_path_factory = config._tmp_path_factory # type: ignore[attr-defined] + except AttributeError: + # tmpdir plugin is blocked. + pass + else: + _tmpdirhandler = TempdirFactory(tmp_path_factory, _ispytest=True) + mp.setattr(config, "_tmpdirhandler", _tmpdirhandler, raising=False) + + config.pluginmanager.register(LegacyTmpdirPlugin, "legacypath-tmpdir") + + +@hookimpl +def pytest_plugin_registered(plugin: object, manager: PytestPluginManager) -> None: + # pytester is not loaded by default and is commonly loaded from a conftest, + # so checking for it in `pytest_configure` is not enough. + is_pytester = plugin is manager.get_plugin("pytester") + if is_pytester and not manager.is_registered(LegacyTestdirPlugin): + manager.register(LegacyTestdirPlugin, "legacypath-pytester") diff --git a/venv/lib/python3.12/site-packages/_pytest/logging.py b/venv/lib/python3.12/site-packages/_pytest/logging.py new file mode 100644 index 0000000000..9f2f1c7935 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/logging.py @@ -0,0 +1,920 @@ +"""Access and control log capturing.""" +import io +import logging +import os +import re +from contextlib import contextmanager +from contextlib import nullcontext +from datetime import datetime +from datetime import timedelta +from datetime import timezone +from io import StringIO +from logging import LogRecord +from pathlib import Path +from typing import AbstractSet +from typing import Dict +from typing import Generator +from typing import List +from typing import Mapping +from typing import Optional +from typing import Tuple +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from _pytest import nodes +from _pytest._io import TerminalWriter +from _pytest.capture import CaptureManager +from _pytest.compat import final +from _pytest.config import _strtobool +from _pytest.config import Config +from _pytest.config import create_terminal_writer +from _pytest.config import hookimpl +from _pytest.config import UsageError +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.main import Session +from _pytest.stash import StashKey +from _pytest.terminal import TerminalReporter + +if TYPE_CHECKING: + logging_StreamHandler = logging.StreamHandler[StringIO] + + from typing_extensions import Literal +else: + logging_StreamHandler = logging.StreamHandler + +DEFAULT_LOG_FORMAT = "%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s" +DEFAULT_LOG_DATE_FORMAT = "%H:%M:%S" +_ANSI_ESCAPE_SEQ = re.compile(r"\x1b\[[\d;]+m") +caplog_handler_key = StashKey["LogCaptureHandler"]() +caplog_records_key = StashKey[Dict[str, List[logging.LogRecord]]]() + + +def _remove_ansi_escape_sequences(text: str) -> str: + return _ANSI_ESCAPE_SEQ.sub("", text) + + +class DatetimeFormatter(logging.Formatter): + """A logging formatter which formats record with + :func:`datetime.datetime.strftime` formatter instead of + :func:`time.strftime` in case of microseconds in format string. + """ + + def formatTime(self, record: LogRecord, datefmt=None) -> str: + if datefmt and "%f" in datefmt: + ct = self.converter(record.created) + tz = timezone(timedelta(seconds=ct.tm_gmtoff), ct.tm_zone) + # Construct `datetime.datetime` object from `struct_time` + # and msecs information from `record` + dt = datetime(*ct[0:6], microsecond=round(record.msecs * 1000), tzinfo=tz) + return dt.strftime(datefmt) + # Use `logging.Formatter` for non-microsecond formats + return super().formatTime(record, datefmt) + + +class ColoredLevelFormatter(DatetimeFormatter): + """A logging formatter which colorizes the %(levelname)..s part of the + log format passed to __init__.""" + + LOGLEVEL_COLOROPTS: Mapping[int, AbstractSet[str]] = { + logging.CRITICAL: {"red"}, + logging.ERROR: {"red", "bold"}, + logging.WARNING: {"yellow"}, + logging.WARN: {"yellow"}, + logging.INFO: {"green"}, + logging.DEBUG: {"purple"}, + logging.NOTSET: set(), + } + LEVELNAME_FMT_REGEX = re.compile(r"%\(levelname\)([+-.]?\d*(?:\.\d+)?s)") + + def __init__(self, terminalwriter: TerminalWriter, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + self._terminalwriter = terminalwriter + self._original_fmt = self._style._fmt + self._level_to_fmt_mapping: Dict[int, str] = {} + + for level, color_opts in self.LOGLEVEL_COLOROPTS.items(): + self.add_color_level(level, *color_opts) + + def add_color_level(self, level: int, *color_opts: str) -> None: + """Add or update color opts for a log level. + + :param level: + Log level to apply a style to, e.g. ``logging.INFO``. + :param color_opts: + ANSI escape sequence color options. Capitalized colors indicates + background color, i.e. ``'green', 'Yellow', 'bold'`` will give bold + green text on yellow background. + + .. warning:: + This is an experimental API. + """ + + assert self._fmt is not None + levelname_fmt_match = self.LEVELNAME_FMT_REGEX.search(self._fmt) + if not levelname_fmt_match: + return + levelname_fmt = levelname_fmt_match.group() + + formatted_levelname = levelname_fmt % {"levelname": logging.getLevelName(level)} + + # add ANSI escape sequences around the formatted levelname + color_kwargs = {name: True for name in color_opts} + colorized_formatted_levelname = self._terminalwriter.markup( + formatted_levelname, **color_kwargs + ) + self._level_to_fmt_mapping[level] = self.LEVELNAME_FMT_REGEX.sub( + colorized_formatted_levelname, self._fmt + ) + + def format(self, record: logging.LogRecord) -> str: + fmt = self._level_to_fmt_mapping.get(record.levelno, self._original_fmt) + self._style._fmt = fmt + return super().format(record) + + +class PercentStyleMultiline(logging.PercentStyle): + """A logging style with special support for multiline messages. + + If the message of a record consists of multiple lines, this style + formats the message as if each line were logged separately. + """ + + def __init__(self, fmt: str, auto_indent: Union[int, str, bool, None]) -> None: + super().__init__(fmt) + self._auto_indent = self._get_auto_indent(auto_indent) + + @staticmethod + def _get_auto_indent(auto_indent_option: Union[int, str, bool, None]) -> int: + """Determine the current auto indentation setting. + + Specify auto indent behavior (on/off/fixed) by passing in + extra={"auto_indent": [value]} to the call to logging.log() or + using a --log-auto-indent [value] command line or the + log_auto_indent [value] config option. + + Default behavior is auto-indent off. + + Using the string "True" or "on" or the boolean True as the value + turns auto indent on, using the string "False" or "off" or the + boolean False or the int 0 turns it off, and specifying a + positive integer fixes the indentation position to the value + specified. + + Any other values for the option are invalid, and will silently be + converted to the default. + + :param None|bool|int|str auto_indent_option: + User specified option for indentation from command line, config + or extra kwarg. Accepts int, bool or str. str option accepts the + same range of values as boolean config options, as well as + positive integers represented in str form. + + :returns: + Indentation value, which can be + -1 (automatically determine indentation) or + 0 (auto-indent turned off) or + >0 (explicitly set indentation position). + """ + + if auto_indent_option is None: + return 0 + elif isinstance(auto_indent_option, bool): + if auto_indent_option: + return -1 + else: + return 0 + elif isinstance(auto_indent_option, int): + return int(auto_indent_option) + elif isinstance(auto_indent_option, str): + try: + return int(auto_indent_option) + except ValueError: + pass + try: + if _strtobool(auto_indent_option): + return -1 + except ValueError: + return 0 + + return 0 + + def format(self, record: logging.LogRecord) -> str: + if "\n" in record.message: + if hasattr(record, "auto_indent"): + # Passed in from the "extra={}" kwarg on the call to logging.log(). + auto_indent = self._get_auto_indent(record.auto_indent) # type: ignore[attr-defined] + else: + auto_indent = self._auto_indent + + if auto_indent: + lines = record.message.splitlines() + formatted = self._fmt % {**record.__dict__, "message": lines[0]} + + if auto_indent < 0: + indentation = _remove_ansi_escape_sequences(formatted).find( + lines[0] + ) + else: + # Optimizes logging by allowing a fixed indentation. + indentation = auto_indent + lines[0] = formatted + return ("\n" + " " * indentation).join(lines) + return self._fmt % record.__dict__ + + +def get_option_ini(config: Config, *names: str): + for name in names: + ret = config.getoption(name) # 'default' arg won't work as expected + if ret is None: + ret = config.getini(name) + if ret: + return ret + + +def pytest_addoption(parser: Parser) -> None: + """Add options to control log capturing.""" + group = parser.getgroup("logging") + + def add_option_ini(option, dest, default=None, type=None, **kwargs): + parser.addini( + dest, default=default, type=type, help="Default value for " + option + ) + group.addoption(option, dest=dest, **kwargs) + + add_option_ini( + "--log-level", + dest="log_level", + default=None, + metavar="LEVEL", + help=( + "Level of messages to catch/display." + " Not set by default, so it depends on the root/parent log handler's" + ' effective level, where it is "WARNING" by default.' + ), + ) + add_option_ini( + "--log-format", + dest="log_format", + default=DEFAULT_LOG_FORMAT, + help="Log format used by the logging module", + ) + add_option_ini( + "--log-date-format", + dest="log_date_format", + default=DEFAULT_LOG_DATE_FORMAT, + help="Log date format used by the logging module", + ) + parser.addini( + "log_cli", + default=False, + type="bool", + help='Enable log display during test run (also known as "live logging")', + ) + add_option_ini( + "--log-cli-level", dest="log_cli_level", default=None, help="CLI logging level" + ) + add_option_ini( + "--log-cli-format", + dest="log_cli_format", + default=None, + help="Log format used by the logging module", + ) + add_option_ini( + "--log-cli-date-format", + dest="log_cli_date_format", + default=None, + help="Log date format used by the logging module", + ) + add_option_ini( + "--log-file", + dest="log_file", + default=None, + help="Path to a file when logging will be written to", + ) + add_option_ini( + "--log-file-level", + dest="log_file_level", + default=None, + help="Log file logging level", + ) + add_option_ini( + "--log-file-format", + dest="log_file_format", + default=DEFAULT_LOG_FORMAT, + help="Log format used by the logging module", + ) + add_option_ini( + "--log-file-date-format", + dest="log_file_date_format", + default=DEFAULT_LOG_DATE_FORMAT, + help="Log date format used by the logging module", + ) + add_option_ini( + "--log-auto-indent", + dest="log_auto_indent", + default=None, + help="Auto-indent multiline messages passed to the logging module. Accepts true|on, false|off or an integer.", + ) + group.addoption( + "--log-disable", + action="append", + default=[], + dest="logger_disable", + help="Disable a logger by name. Can be passed multiple times.", + ) + + +_HandlerType = TypeVar("_HandlerType", bound=logging.Handler) + + +# Not using @contextmanager for performance reasons. +class catching_logs: + """Context manager that prepares the whole logging machinery properly.""" + + __slots__ = ("handler", "level", "orig_level") + + def __init__(self, handler: _HandlerType, level: Optional[int] = None) -> None: + self.handler = handler + self.level = level + + def __enter__(self): + root_logger = logging.getLogger() + if self.level is not None: + self.handler.setLevel(self.level) + root_logger.addHandler(self.handler) + if self.level is not None: + self.orig_level = root_logger.level + root_logger.setLevel(min(self.orig_level, self.level)) + return self.handler + + def __exit__(self, type, value, traceback): + root_logger = logging.getLogger() + if self.level is not None: + root_logger.setLevel(self.orig_level) + root_logger.removeHandler(self.handler) + + +class LogCaptureHandler(logging_StreamHandler): + """A logging handler that stores log records and the log text.""" + + def __init__(self) -> None: + """Create a new log handler.""" + super().__init__(StringIO()) + self.records: List[logging.LogRecord] = [] + + def emit(self, record: logging.LogRecord) -> None: + """Keep the log records in a list in addition to the log text.""" + self.records.append(record) + super().emit(record) + + def reset(self) -> None: + self.records = [] + self.stream = StringIO() + + def clear(self) -> None: + self.records.clear() + self.stream = StringIO() + + def handleError(self, record: logging.LogRecord) -> None: + if logging.raiseExceptions: + # Fail the test if the log message is bad (emit failed). + # The default behavior of logging is to print "Logging error" + # to stderr with the call stack and some extra details. + # pytest wants to make such mistakes visible during testing. + raise + + +@final +class LogCaptureFixture: + """Provides access and control of log capturing.""" + + def __init__(self, item: nodes.Node, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + self._item = item + self._initial_handler_level: Optional[int] = None + # Dict of log name -> log level. + self._initial_logger_levels: Dict[Optional[str], int] = {} + self._initial_disabled_logging_level: Optional[int] = None + + def _finalize(self) -> None: + """Finalize the fixture. + + This restores the log levels and the disabled logging levels changed by :meth:`set_level`. + """ + # Restore log levels. + if self._initial_handler_level is not None: + self.handler.setLevel(self._initial_handler_level) + for logger_name, level in self._initial_logger_levels.items(): + logger = logging.getLogger(logger_name) + logger.setLevel(level) + # Disable logging at the original disabled logging level. + if self._initial_disabled_logging_level is not None: + logging.disable(self._initial_disabled_logging_level) + self._initial_disabled_logging_level = None + + @property + def handler(self) -> LogCaptureHandler: + """Get the logging handler used by the fixture.""" + return self._item.stash[caplog_handler_key] + + def get_records( + self, when: "Literal['setup', 'call', 'teardown']" + ) -> List[logging.LogRecord]: + """Get the logging records for one of the possible test phases. + + :param when: + Which test phase to obtain the records from. + Valid values are: "setup", "call" and "teardown". + + :returns: The list of captured records at the given stage. + + .. versionadded:: 3.4 + """ + return self._item.stash[caplog_records_key].get(when, []) + + @property + def text(self) -> str: + """The formatted log text.""" + return _remove_ansi_escape_sequences(self.handler.stream.getvalue()) + + @property + def records(self) -> List[logging.LogRecord]: + """The list of log records.""" + return self.handler.records + + @property + def record_tuples(self) -> List[Tuple[str, int, str]]: + """A list of a stripped down version of log records intended + for use in assertion comparison. + + The format of the tuple is: + + (logger_name, log_level, message) + """ + return [(r.name, r.levelno, r.getMessage()) for r in self.records] + + @property + def messages(self) -> List[str]: + """A list of format-interpolated log messages. + + Unlike 'records', which contains the format string and parameters for + interpolation, log messages in this list are all interpolated. + + Unlike 'text', which contains the output from the handler, log + messages in this list are unadorned with levels, timestamps, etc, + making exact comparisons more reliable. + + Note that traceback or stack info (from :func:`logging.exception` or + the `exc_info` or `stack_info` arguments to the logging functions) is + not included, as this is added by the formatter in the handler. + + .. versionadded:: 3.7 + """ + return [r.getMessage() for r in self.records] + + def clear(self) -> None: + """Reset the list of log records and the captured log text.""" + self.handler.clear() + + def _force_enable_logging( + self, level: Union[int, str], logger_obj: logging.Logger + ) -> int: + """Enable the desired logging level if the global level was disabled via ``logging.disabled``. + + Only enables logging levels greater than or equal to the requested ``level``. + + Does nothing if the desired ``level`` wasn't disabled. + + :param level: + The logger level caplog should capture. + All logging is enabled if a non-standard logging level string is supplied. + Valid level strings are in :data:`logging._nameToLevel`. + :param logger_obj: The logger object to check. + + :return: The original disabled logging level. + """ + original_disable_level: int = logger_obj.manager.disable # type: ignore[attr-defined] + + if isinstance(level, str): + # Try to translate the level string to an int for `logging.disable()` + level = logging.getLevelName(level) + + if not isinstance(level, int): + # The level provided was not valid, so just un-disable all logging. + logging.disable(logging.NOTSET) + elif not logger_obj.isEnabledFor(level): + # Each level is `10` away from other levels. + # https://docs.python.org/3/library/logging.html#logging-levels + disable_level = max(level - 10, logging.NOTSET) + logging.disable(disable_level) + + return original_disable_level + + def set_level(self, level: Union[int, str], logger: Optional[str] = None) -> None: + """Set the threshold level of a logger for the duration of a test. + + Logging messages which are less severe than this level will not be captured. + + .. versionchanged:: 3.4 + The levels of the loggers changed by this function will be + restored to their initial values at the end of the test. + + Will enable the requested logging level if it was disabled via :meth:`logging.disable`. + + :param level: The level. + :param logger: The logger to update. If not given, the root logger. + """ + logger_obj = logging.getLogger(logger) + # Save the original log-level to restore it during teardown. + self._initial_logger_levels.setdefault(logger, logger_obj.level) + logger_obj.setLevel(level) + if self._initial_handler_level is None: + self._initial_handler_level = self.handler.level + self.handler.setLevel(level) + initial_disabled_logging_level = self._force_enable_logging(level, logger_obj) + if self._initial_disabled_logging_level is None: + self._initial_disabled_logging_level = initial_disabled_logging_level + + @contextmanager + def at_level( + self, level: Union[int, str], logger: Optional[str] = None + ) -> Generator[None, None, None]: + """Context manager that sets the level for capturing of logs. After + the end of the 'with' statement the level is restored to its original + value. + + Will enable the requested logging level if it was disabled via :meth:`logging.disable`. + + :param level: The level. + :param logger: The logger to update. If not given, the root logger. + """ + logger_obj = logging.getLogger(logger) + orig_level = logger_obj.level + logger_obj.setLevel(level) + handler_orig_level = self.handler.level + self.handler.setLevel(level) + original_disable_level = self._force_enable_logging(level, logger_obj) + try: + yield + finally: + logger_obj.setLevel(orig_level) + self.handler.setLevel(handler_orig_level) + logging.disable(original_disable_level) + + +@fixture +def caplog(request: FixtureRequest) -> Generator[LogCaptureFixture, None, None]: + """Access and control log capturing. + + Captured logs are available through the following properties/methods:: + + * caplog.messages -> list of format-interpolated log messages + * caplog.text -> string containing formatted log output + * caplog.records -> list of logging.LogRecord instances + * caplog.record_tuples -> list of (logger_name, level, message) tuples + * caplog.clear() -> clear captured records and formatted log output string + """ + result = LogCaptureFixture(request.node, _ispytest=True) + yield result + result._finalize() + + +def get_log_level_for_setting(config: Config, *setting_names: str) -> Optional[int]: + for setting_name in setting_names: + log_level = config.getoption(setting_name) + if log_level is None: + log_level = config.getini(setting_name) + if log_level: + break + else: + return None + + if isinstance(log_level, str): + log_level = log_level.upper() + try: + return int(getattr(logging, log_level, log_level)) + except ValueError as e: + # Python logging does not recognise this as a logging level + raise UsageError( + "'{}' is not recognized as a logging level name for " + "'{}'. Please consider passing the " + "logging level num instead.".format(log_level, setting_name) + ) from e + + +# run after terminalreporter/capturemanager are configured +@hookimpl(trylast=True) +def pytest_configure(config: Config) -> None: + config.pluginmanager.register(LoggingPlugin(config), "logging-plugin") + + +class LoggingPlugin: + """Attaches to the logging module and captures log messages for each test.""" + + def __init__(self, config: Config) -> None: + """Create a new plugin to capture log messages. + + The formatter can be safely shared across all handlers so + create a single one for the entire test session here. + """ + self._config = config + + # Report logging. + self.formatter = self._create_formatter( + get_option_ini(config, "log_format"), + get_option_ini(config, "log_date_format"), + get_option_ini(config, "log_auto_indent"), + ) + self.log_level = get_log_level_for_setting(config, "log_level") + self.caplog_handler = LogCaptureHandler() + self.caplog_handler.setFormatter(self.formatter) + self.report_handler = LogCaptureHandler() + self.report_handler.setFormatter(self.formatter) + + # File logging. + self.log_file_level = get_log_level_for_setting(config, "log_file_level") + log_file = get_option_ini(config, "log_file") or os.devnull + if log_file != os.devnull: + directory = os.path.dirname(os.path.abspath(log_file)) + if not os.path.isdir(directory): + os.makedirs(directory) + + self.log_file_handler = _FileHandler(log_file, mode="w", encoding="UTF-8") + log_file_format = get_option_ini(config, "log_file_format", "log_format") + log_file_date_format = get_option_ini( + config, "log_file_date_format", "log_date_format" + ) + + log_file_formatter = DatetimeFormatter( + log_file_format, datefmt=log_file_date_format + ) + self.log_file_handler.setFormatter(log_file_formatter) + + # CLI/live logging. + self.log_cli_level = get_log_level_for_setting( + config, "log_cli_level", "log_level" + ) + if self._log_cli_enabled(): + terminal_reporter = config.pluginmanager.get_plugin("terminalreporter") + # Guaranteed by `_log_cli_enabled()`. + assert terminal_reporter is not None + capture_manager = config.pluginmanager.get_plugin("capturemanager") + # if capturemanager plugin is disabled, live logging still works. + self.log_cli_handler: Union[ + _LiveLoggingStreamHandler, _LiveLoggingNullHandler + ] = _LiveLoggingStreamHandler(terminal_reporter, capture_manager) + else: + self.log_cli_handler = _LiveLoggingNullHandler() + log_cli_formatter = self._create_formatter( + get_option_ini(config, "log_cli_format", "log_format"), + get_option_ini(config, "log_cli_date_format", "log_date_format"), + get_option_ini(config, "log_auto_indent"), + ) + self.log_cli_handler.setFormatter(log_cli_formatter) + self._disable_loggers(loggers_to_disable=config.option.logger_disable) + + def _disable_loggers(self, loggers_to_disable: List[str]) -> None: + if not loggers_to_disable: + return + + for name in loggers_to_disable: + logger = logging.getLogger(name) + logger.disabled = True + + def _create_formatter(self, log_format, log_date_format, auto_indent): + # Color option doesn't exist if terminal plugin is disabled. + color = getattr(self._config.option, "color", "no") + if color != "no" and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search( + log_format + ): + formatter: logging.Formatter = ColoredLevelFormatter( + create_terminal_writer(self._config), log_format, log_date_format + ) + else: + formatter = DatetimeFormatter(log_format, log_date_format) + + formatter._style = PercentStyleMultiline( + formatter._style._fmt, auto_indent=auto_indent + ) + + return formatter + + def set_log_path(self, fname: str) -> None: + """Set the filename parameter for Logging.FileHandler(). + + Creates parent directory if it does not exist. + + .. warning:: + This is an experimental API. + """ + fpath = Path(fname) + + if not fpath.is_absolute(): + fpath = self._config.rootpath / fpath + + if not fpath.parent.exists(): + fpath.parent.mkdir(exist_ok=True, parents=True) + + # https://github.com/python/mypy/issues/11193 + stream: io.TextIOWrapper = fpath.open(mode="w", encoding="UTF-8") # type: ignore[assignment] + old_stream = self.log_file_handler.setStream(stream) + if old_stream: + old_stream.close() + + def _log_cli_enabled(self): + """Return whether live logging is enabled.""" + enabled = self._config.getoption( + "--log-cli-level" + ) is not None or self._config.getini("log_cli") + if not enabled: + return False + + terminal_reporter = self._config.pluginmanager.get_plugin("terminalreporter") + if terminal_reporter is None: + # terminal reporter is disabled e.g. by pytest-xdist. + return False + + return True + + @hookimpl(hookwrapper=True, tryfirst=True) + def pytest_sessionstart(self) -> Generator[None, None, None]: + self.log_cli_handler.set_when("sessionstart") + + with catching_logs(self.log_cli_handler, level=self.log_cli_level): + with catching_logs(self.log_file_handler, level=self.log_file_level): + yield + + @hookimpl(hookwrapper=True, tryfirst=True) + def pytest_collection(self) -> Generator[None, None, None]: + self.log_cli_handler.set_when("collection") + + with catching_logs(self.log_cli_handler, level=self.log_cli_level): + with catching_logs(self.log_file_handler, level=self.log_file_level): + yield + + @hookimpl(hookwrapper=True) + def pytest_runtestloop(self, session: Session) -> Generator[None, None, None]: + if session.config.option.collectonly: + yield + return + + if self._log_cli_enabled() and self._config.getoption("verbose") < 1: + # The verbose flag is needed to avoid messy test progress output. + self._config.option.verbose = 1 + + with catching_logs(self.log_cli_handler, level=self.log_cli_level): + with catching_logs(self.log_file_handler, level=self.log_file_level): + yield # Run all the tests. + + @hookimpl + def pytest_runtest_logstart(self) -> None: + self.log_cli_handler.reset() + self.log_cli_handler.set_when("start") + + @hookimpl + def pytest_runtest_logreport(self) -> None: + self.log_cli_handler.set_when("logreport") + + def _runtest_for(self, item: nodes.Item, when: str) -> Generator[None, None, None]: + """Implement the internals of the pytest_runtest_xxx() hooks.""" + with catching_logs( + self.caplog_handler, + level=self.log_level, + ) as caplog_handler, catching_logs( + self.report_handler, + level=self.log_level, + ) as report_handler: + caplog_handler.reset() + report_handler.reset() + item.stash[caplog_records_key][when] = caplog_handler.records + item.stash[caplog_handler_key] = caplog_handler + + yield + + log = report_handler.stream.getvalue().strip() + item.add_report_section(when, "log", log) + + @hookimpl(hookwrapper=True) + def pytest_runtest_setup(self, item: nodes.Item) -> Generator[None, None, None]: + self.log_cli_handler.set_when("setup") + + empty: Dict[str, List[logging.LogRecord]] = {} + item.stash[caplog_records_key] = empty + yield from self._runtest_for(item, "setup") + + @hookimpl(hookwrapper=True) + def pytest_runtest_call(self, item: nodes.Item) -> Generator[None, None, None]: + self.log_cli_handler.set_when("call") + + yield from self._runtest_for(item, "call") + + @hookimpl(hookwrapper=True) + def pytest_runtest_teardown(self, item: nodes.Item) -> Generator[None, None, None]: + self.log_cli_handler.set_when("teardown") + + yield from self._runtest_for(item, "teardown") + del item.stash[caplog_records_key] + del item.stash[caplog_handler_key] + + @hookimpl + def pytest_runtest_logfinish(self) -> None: + self.log_cli_handler.set_when("finish") + + @hookimpl(hookwrapper=True, tryfirst=True) + def pytest_sessionfinish(self) -> Generator[None, None, None]: + self.log_cli_handler.set_when("sessionfinish") + + with catching_logs(self.log_cli_handler, level=self.log_cli_level): + with catching_logs(self.log_file_handler, level=self.log_file_level): + yield + + @hookimpl + def pytest_unconfigure(self) -> None: + # Close the FileHandler explicitly. + # (logging.shutdown might have lost the weakref?!) + self.log_file_handler.close() + + +class _FileHandler(logging.FileHandler): + """A logging FileHandler with pytest tweaks.""" + + def handleError(self, record: logging.LogRecord) -> None: + # Handled by LogCaptureHandler. + pass + + +class _LiveLoggingStreamHandler(logging_StreamHandler): + """A logging StreamHandler used by the live logging feature: it will + write a newline before the first log message in each test. + + During live logging we must also explicitly disable stdout/stderr + capturing otherwise it will get captured and won't appear in the + terminal. + """ + + # Officially stream needs to be a IO[str], but TerminalReporter + # isn't. So force it. + stream: TerminalReporter = None # type: ignore + + def __init__( + self, + terminal_reporter: TerminalReporter, + capture_manager: Optional[CaptureManager], + ) -> None: + super().__init__(stream=terminal_reporter) # type: ignore[arg-type] + self.capture_manager = capture_manager + self.reset() + self.set_when(None) + self._test_outcome_written = False + + def reset(self) -> None: + """Reset the handler; should be called before the start of each test.""" + self._first_record_emitted = False + + def set_when(self, when: Optional[str]) -> None: + """Prepare for the given test phase (setup/call/teardown).""" + self._when = when + self._section_name_shown = False + if when == "start": + self._test_outcome_written = False + + def emit(self, record: logging.LogRecord) -> None: + ctx_manager = ( + self.capture_manager.global_and_fixture_disabled() + if self.capture_manager + else nullcontext() + ) + with ctx_manager: + if not self._first_record_emitted: + self.stream.write("\n") + self._first_record_emitted = True + elif self._when in ("teardown", "finish"): + if not self._test_outcome_written: + self._test_outcome_written = True + self.stream.write("\n") + if not self._section_name_shown and self._when: + self.stream.section("live log " + self._when, sep="-", bold=True) + self._section_name_shown = True + super().emit(record) + + def handleError(self, record: logging.LogRecord) -> None: + # Handled by LogCaptureHandler. + pass + + +class _LiveLoggingNullHandler(logging.NullHandler): + """A logging handler used when live logging is disabled.""" + + def reset(self) -> None: + pass + + def set_when(self, when: str) -> None: + pass + + def handleError(self, record: logging.LogRecord) -> None: + # Handled by LogCaptureHandler. + pass diff --git a/venv/lib/python3.12/site-packages/_pytest/main.py b/venv/lib/python3.12/site-packages/_pytest/main.py new file mode 100644 index 0000000000..ea89a63fa1 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/main.py @@ -0,0 +1,913 @@ +"""Core implementation of the testing process: init, session, runtest loop.""" +import argparse +import dataclasses +import fnmatch +import functools +import importlib +import os +import sys +from pathlib import Path +from typing import Callable +from typing import Dict +from typing import FrozenSet +from typing import Iterator +from typing import List +from typing import Optional +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import Union + +import _pytest._code +from _pytest import nodes +from _pytest.compat import final +from _pytest.compat import overload +from _pytest.config import Config +from _pytest.config import directory_arg +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config import PytestPluginManager +from _pytest.config import UsageError +from _pytest.config.argparsing import Parser +from _pytest.fixtures import FixtureManager +from _pytest.outcomes import exit +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath +from _pytest.pathlib import fnmatch_ex +from _pytest.pathlib import safe_exists +from _pytest.pathlib import visit +from _pytest.reports import CollectReport +from _pytest.reports import TestReport +from _pytest.runner import collect_one_node +from _pytest.runner import SetupState + + +if TYPE_CHECKING: + from typing_extensions import Literal + + +def pytest_addoption(parser: Parser) -> None: + parser.addini( + "norecursedirs", + "Directory patterns to avoid for recursion", + type="args", + default=[ + "*.egg", + ".*", + "_darcs", + "build", + "CVS", + "dist", + "node_modules", + "venv", + "{arch}", + ], + ) + parser.addini( + "testpaths", + "Directories to search for tests when no files or directories are given on the " + "command line", + type="args", + default=[], + ) + group = parser.getgroup("general", "Running and selection options") + group._addoption( + "-x", + "--exitfirst", + action="store_const", + dest="maxfail", + const=1, + help="Exit instantly on first error or failed test", + ) + group = parser.getgroup("pytest-warnings") + group.addoption( + "-W", + "--pythonwarnings", + action="append", + help="Set which warnings to report, see -W option of Python itself", + ) + parser.addini( + "filterwarnings", + type="linelist", + help="Each line specifies a pattern for " + "warnings.filterwarnings. " + "Processed after -W/--pythonwarnings.", + ) + group._addoption( + "--maxfail", + metavar="num", + action="store", + type=int, + dest="maxfail", + default=0, + help="Exit after first num failures or errors", + ) + group._addoption( + "--strict-config", + action="store_true", + help="Any warnings encountered while parsing the `pytest` section of the " + "configuration file raise errors", + ) + group._addoption( + "--strict-markers", + action="store_true", + help="Markers not registered in the `markers` section of the configuration " + "file raise errors", + ) + group._addoption( + "--strict", + action="store_true", + help="(Deprecated) alias to --strict-markers", + ) + group._addoption( + "-c", + "--config-file", + metavar="FILE", + type=str, + dest="inifilename", + help="Load configuration from `FILE` instead of trying to locate one of the " + "implicit configuration files.", + ) + group._addoption( + "--continue-on-collection-errors", + action="store_true", + default=False, + dest="continue_on_collection_errors", + help="Force test execution even if collection errors occur", + ) + group._addoption( + "--rootdir", + action="store", + dest="rootdir", + help="Define root directory for tests. Can be relative path: 'root_dir', './root_dir', " + "'root_dir/another_dir/'; absolute path: '/home/user/root_dir'; path with variables: " + "'$HOME/root_dir'.", + ) + + group = parser.getgroup("collect", "collection") + group.addoption( + "--collectonly", + "--collect-only", + "--co", + action="store_true", + help="Only collect tests, don't execute them", + ) + group.addoption( + "--pyargs", + action="store_true", + help="Try to interpret all arguments as Python packages", + ) + group.addoption( + "--ignore", + action="append", + metavar="path", + help="Ignore path during collection (multi-allowed)", + ) + group.addoption( + "--ignore-glob", + action="append", + metavar="path", + help="Ignore path pattern during collection (multi-allowed)", + ) + group.addoption( + "--deselect", + action="append", + metavar="nodeid_prefix", + help="Deselect item (via node id prefix) during collection (multi-allowed)", + ) + group.addoption( + "--confcutdir", + dest="confcutdir", + default=None, + metavar="dir", + type=functools.partial(directory_arg, optname="--confcutdir"), + help="Only load conftest.py's relative to specified dir", + ) + group.addoption( + "--noconftest", + action="store_true", + dest="noconftest", + default=False, + help="Don't load any conftest.py files", + ) + group.addoption( + "--keepduplicates", + "--keep-duplicates", + action="store_true", + dest="keepduplicates", + default=False, + help="Keep duplicate tests", + ) + group.addoption( + "--collect-in-virtualenv", + action="store_true", + dest="collect_in_virtualenv", + default=False, + help="Don't ignore tests in a local virtualenv directory", + ) + group.addoption( + "--import-mode", + default="prepend", + choices=["prepend", "append", "importlib"], + dest="importmode", + help="Prepend/append to sys.path when importing test modules and conftest " + "files. Default: prepend.", + ) + + group = parser.getgroup("debugconfig", "test session debugging and configuration") + group.addoption( + "--basetemp", + dest="basetemp", + default=None, + type=validate_basetemp, + metavar="dir", + help=( + "Base temporary directory for this test run. " + "(Warning: this directory is removed if it exists.)" + ), + ) + + +def validate_basetemp(path: str) -> str: + # GH 7119 + msg = "basetemp must not be empty, the current working directory or any parent directory of it" + + # empty path + if not path: + raise argparse.ArgumentTypeError(msg) + + def is_ancestor(base: Path, query: Path) -> bool: + """Return whether query is an ancestor of base.""" + if base == query: + return True + return query in base.parents + + # check if path is an ancestor of cwd + if is_ancestor(Path.cwd(), Path(path).absolute()): + raise argparse.ArgumentTypeError(msg) + + # check symlinks for ancestors + if is_ancestor(Path.cwd().resolve(), Path(path).resolve()): + raise argparse.ArgumentTypeError(msg) + + return path + + +def wrap_session( + config: Config, doit: Callable[[Config, "Session"], Optional[Union[int, ExitCode]]] +) -> Union[int, ExitCode]: + """Skeleton command line program.""" + session = Session.from_config(config) + session.exitstatus = ExitCode.OK + initstate = 0 + try: + try: + config._do_configure() + initstate = 1 + config.hook.pytest_sessionstart(session=session) + initstate = 2 + session.exitstatus = doit(config, session) or 0 + except UsageError: + session.exitstatus = ExitCode.USAGE_ERROR + raise + except Failed: + session.exitstatus = ExitCode.TESTS_FAILED + except (KeyboardInterrupt, exit.Exception): + excinfo = _pytest._code.ExceptionInfo.from_current() + exitstatus: Union[int, ExitCode] = ExitCode.INTERRUPTED + if isinstance(excinfo.value, exit.Exception): + if excinfo.value.returncode is not None: + exitstatus = excinfo.value.returncode + if initstate < 2: + sys.stderr.write(f"{excinfo.typename}: {excinfo.value.msg}\n") + config.hook.pytest_keyboard_interrupt(excinfo=excinfo) + session.exitstatus = exitstatus + except BaseException: + session.exitstatus = ExitCode.INTERNAL_ERROR + excinfo = _pytest._code.ExceptionInfo.from_current() + try: + config.notify_exception(excinfo, config.option) + except exit.Exception as exc: + if exc.returncode is not None: + session.exitstatus = exc.returncode + sys.stderr.write(f"{type(exc).__name__}: {exc}\n") + else: + if isinstance(excinfo.value, SystemExit): + sys.stderr.write("mainloop: caught unexpected SystemExit!\n") + + finally: + # Explicitly break reference cycle. + excinfo = None # type: ignore + os.chdir(session.startpath) + if initstate >= 2: + try: + config.hook.pytest_sessionfinish( + session=session, exitstatus=session.exitstatus + ) + except exit.Exception as exc: + if exc.returncode is not None: + session.exitstatus = exc.returncode + sys.stderr.write(f"{type(exc).__name__}: {exc}\n") + config._ensure_unconfigure() + return session.exitstatus + + +def pytest_cmdline_main(config: Config) -> Union[int, ExitCode]: + return wrap_session(config, _main) + + +def _main(config: Config, session: "Session") -> Optional[Union[int, ExitCode]]: + """Default command line protocol for initialization, session, + running tests and reporting.""" + config.hook.pytest_collection(session=session) + config.hook.pytest_runtestloop(session=session) + + if session.testsfailed: + return ExitCode.TESTS_FAILED + elif session.testscollected == 0: + return ExitCode.NO_TESTS_COLLECTED + return None + + +def pytest_collection(session: "Session") -> None: + session.perform_collect() + + +def pytest_runtestloop(session: "Session") -> bool: + if session.testsfailed and not session.config.option.continue_on_collection_errors: + raise session.Interrupted( + "%d error%s during collection" + % (session.testsfailed, "s" if session.testsfailed != 1 else "") + ) + + if session.config.option.collectonly: + return True + + for i, item in enumerate(session.items): + nextitem = session.items[i + 1] if i + 1 < len(session.items) else None + item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) + if session.shouldfail: + raise session.Failed(session.shouldfail) + if session.shouldstop: + raise session.Interrupted(session.shouldstop) + return True + + +def _in_venv(path: Path) -> bool: + """Attempt to detect if ``path`` is the root of a Virtual Environment by + checking for the existence of the appropriate activate script.""" + bindir = path.joinpath("Scripts" if sys.platform.startswith("win") else "bin") + try: + if not bindir.is_dir(): + return False + except OSError: + return False + activates = ( + "activate", + "activate.csh", + "activate.fish", + "Activate", + "Activate.bat", + "Activate.ps1", + ) + return any(fname.name in activates for fname in bindir.iterdir()) + + +def pytest_ignore_collect(collection_path: Path, config: Config) -> Optional[bool]: + ignore_paths = config._getconftest_pathlist( + "collect_ignore", path=collection_path.parent, rootpath=config.rootpath + ) + ignore_paths = ignore_paths or [] + excludeopt = config.getoption("ignore") + if excludeopt: + ignore_paths.extend(absolutepath(x) for x in excludeopt) + + if collection_path in ignore_paths: + return True + + ignore_globs = config._getconftest_pathlist( + "collect_ignore_glob", path=collection_path.parent, rootpath=config.rootpath + ) + ignore_globs = ignore_globs or [] + excludeglobopt = config.getoption("ignore_glob") + if excludeglobopt: + ignore_globs.extend(absolutepath(x) for x in excludeglobopt) + + if any(fnmatch.fnmatch(str(collection_path), str(glob)) for glob in ignore_globs): + return True + + allow_in_venv = config.getoption("collect_in_virtualenv") + if not allow_in_venv and _in_venv(collection_path): + return True + + if collection_path.is_dir(): + norecursepatterns = config.getini("norecursedirs") + if any(fnmatch_ex(pat, collection_path) for pat in norecursepatterns): + return True + + return None + + +def pytest_collection_modifyitems(items: List[nodes.Item], config: Config) -> None: + deselect_prefixes = tuple(config.getoption("deselect") or []) + if not deselect_prefixes: + return + + remaining = [] + deselected = [] + for colitem in items: + if colitem.nodeid.startswith(deselect_prefixes): + deselected.append(colitem) + else: + remaining.append(colitem) + + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +class FSHookProxy: + def __init__(self, pm: PytestPluginManager, remove_mods) -> None: + self.pm = pm + self.remove_mods = remove_mods + + def __getattr__(self, name: str): + x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods) + self.__dict__[name] = x + return x + + +class Interrupted(KeyboardInterrupt): + """Signals that the test run was interrupted.""" + + __module__ = "builtins" # For py3. + + +class Failed(Exception): + """Signals a stop as failed test run.""" + + +@dataclasses.dataclass +class _bestrelpath_cache(Dict[Path, str]): + __slots__ = ("path",) + + path: Path + + def __missing__(self, path: Path) -> str: + r = bestrelpath(self.path, path) + self[path] = r + return r + + +@final +class Session(nodes.FSCollector): + """The root of the collection tree. + + ``Session`` collects the initial paths given as arguments to pytest. + """ + + Interrupted = Interrupted + Failed = Failed + # Set on the session by runner.pytest_sessionstart. + _setupstate: SetupState + # Set on the session by fixtures.pytest_sessionstart. + _fixturemanager: FixtureManager + exitstatus: Union[int, ExitCode] + + def __init__(self, config: Config) -> None: + super().__init__( + path=config.rootpath, + fspath=None, + parent=None, + config=config, + session=self, + nodeid="", + ) + self.testsfailed = 0 + self.testscollected = 0 + self.shouldstop: Union[bool, str] = False + self.shouldfail: Union[bool, str] = False + self.trace = config.trace.root.get("collection") + self._initialpaths: FrozenSet[Path] = frozenset() + + self._bestrelpathcache: Dict[Path, str] = _bestrelpath_cache(config.rootpath) + + self.config.pluginmanager.register(self, name="session") + + @classmethod + def from_config(cls, config: Config) -> "Session": + session: Session = cls._create(config=config) + return session + + def __repr__(self) -> str: + return "<%s %s exitstatus=%r testsfailed=%d testscollected=%d>" % ( + self.__class__.__name__, + self.name, + getattr(self, "exitstatus", ""), + self.testsfailed, + self.testscollected, + ) + + @property + def startpath(self) -> Path: + """The path from which pytest was invoked. + + .. versionadded:: 7.0.0 + """ + return self.config.invocation_params.dir + + def _node_location_to_relpath(self, node_path: Path) -> str: + # bestrelpath is a quite slow function. + return self._bestrelpathcache[node_path] + + @hookimpl(tryfirst=True) + def pytest_collectstart(self) -> None: + if self.shouldfail: + raise self.Failed(self.shouldfail) + if self.shouldstop: + raise self.Interrupted(self.shouldstop) + + @hookimpl(tryfirst=True) + def pytest_runtest_logreport( + self, report: Union[TestReport, CollectReport] + ) -> None: + if report.failed and not hasattr(report, "wasxfail"): + self.testsfailed += 1 + maxfail = self.config.getvalue("maxfail") + if maxfail and self.testsfailed >= maxfail: + self.shouldfail = "stopping after %d failures" % (self.testsfailed) + + pytest_collectreport = pytest_runtest_logreport + + def isinitpath(self, path: Union[str, "os.PathLike[str]"]) -> bool: + # Optimization: Path(Path(...)) is much slower than isinstance. + path_ = path if isinstance(path, Path) else Path(path) + return path_ in self._initialpaths + + def gethookproxy(self, fspath: "os.PathLike[str]"): + # Optimization: Path(Path(...)) is much slower than isinstance. + path = fspath if isinstance(fspath, Path) else Path(fspath) + pm = self.config.pluginmanager + # Check if we have the common case of running + # hooks with all conftest.py files. + my_conftestmodules = pm._getconftestmodules( + path, + self.config.getoption("importmode"), + rootpath=self.config.rootpath, + ) + remove_mods = pm._conftest_plugins.difference(my_conftestmodules) + if remove_mods: + # One or more conftests are not in use at this fspath. + from .config.compat import PathAwareHookProxy + + proxy = PathAwareHookProxy(FSHookProxy(pm, remove_mods)) + else: + # All plugins are active for this fspath. + proxy = self.config.hook + return proxy + + def _recurse(self, direntry: "os.DirEntry[str]") -> bool: + if direntry.name == "__pycache__": + return False + fspath = Path(direntry.path) + ihook = self.gethookproxy(fspath.parent) + if ihook.pytest_ignore_collect(collection_path=fspath, config=self.config): + return False + return True + + def _collectfile( + self, fspath: Path, handle_dupes: bool = True + ) -> Sequence[nodes.Collector]: + assert ( + fspath.is_file() + ), "{!r} is not a file (isdir={!r}, exists={!r}, islink={!r})".format( + fspath, fspath.is_dir(), fspath.exists(), fspath.is_symlink() + ) + ihook = self.gethookproxy(fspath) + if not self.isinitpath(fspath): + if ihook.pytest_ignore_collect(collection_path=fspath, config=self.config): + return () + + if handle_dupes: + keepduplicates = self.config.getoption("keepduplicates") + if not keepduplicates: + duplicate_paths = self.config.pluginmanager._duplicatepaths + if fspath in duplicate_paths: + return () + else: + duplicate_paths.add(fspath) + + return ihook.pytest_collect_file(file_path=fspath, parent=self) # type: ignore[no-any-return] + + @overload + def perform_collect( + self, args: Optional[Sequence[str]] = ..., genitems: "Literal[True]" = ... + ) -> Sequence[nodes.Item]: + ... + + @overload + def perform_collect( # noqa: F811 + self, args: Optional[Sequence[str]] = ..., genitems: bool = ... + ) -> Sequence[Union[nodes.Item, nodes.Collector]]: + ... + + def perform_collect( # noqa: F811 + self, args: Optional[Sequence[str]] = None, genitems: bool = True + ) -> Sequence[Union[nodes.Item, nodes.Collector]]: + """Perform the collection phase for this session. + + This is called by the default :hook:`pytest_collection` hook + implementation; see the documentation of this hook for more details. + For testing purposes, it may also be called directly on a fresh + ``Session``. + + This function normally recursively expands any collectors collected + from the session to their items, and only items are returned. For + testing purposes, this may be suppressed by passing ``genitems=False``, + in which case the return value contains these collectors unexpanded, + and ``session.items`` is empty. + """ + if args is None: + args = self.config.args + + self.trace("perform_collect", self, args) + self.trace.root.indent += 1 + + self._notfound: List[Tuple[str, Sequence[nodes.Collector]]] = [] + self._initial_parts: List[Tuple[Path, List[str]]] = [] + self.items: List[nodes.Item] = [] + + hook = self.config.hook + + items: Sequence[Union[nodes.Item, nodes.Collector]] = self.items + try: + initialpaths: List[Path] = [] + for arg in args: + fspath, parts = resolve_collection_argument( + self.config.invocation_params.dir, + arg, + as_pypath=self.config.option.pyargs, + ) + self._initial_parts.append((fspath, parts)) + initialpaths.append(fspath) + self._initialpaths = frozenset(initialpaths) + rep = collect_one_node(self) + self.ihook.pytest_collectreport(report=rep) + self.trace.root.indent -= 1 + if self._notfound: + errors = [] + for arg, collectors in self._notfound: + if collectors: + errors.append( + f"not found: {arg}\n(no name {arg!r} in any of {collectors!r})" + ) + else: + errors.append(f"found no collectors for {arg}") + + raise UsageError(*errors) + if not genitems: + items = rep.result + else: + if rep.passed: + for node in rep.result: + self.items.extend(self.genitems(node)) + + self.config.pluginmanager.check_pending() + hook.pytest_collection_modifyitems( + session=self, config=self.config, items=items + ) + finally: + hook.pytest_collection_finish(session=self) + + self.testscollected = len(items) + return items + + def collect(self) -> Iterator[Union[nodes.Item, nodes.Collector]]: + from _pytest.python import Package + + # Keep track of any collected nodes in here, so we don't duplicate fixtures. + node_cache1: Dict[Path, Sequence[nodes.Collector]] = {} + node_cache2: Dict[Tuple[Type[nodes.Collector], Path], nodes.Collector] = {} + + # Keep track of any collected collectors in matchnodes paths, so they + # are not collected more than once. + matchnodes_cache: Dict[Tuple[Type[nodes.Collector], str], CollectReport] = {} + + # Directories of pkgs with dunder-init files. + pkg_roots: Dict[Path, Package] = {} + + for argpath, names in self._initial_parts: + self.trace("processing argument", (argpath, names)) + self.trace.root.indent += 1 + + # Start with a Session root, and delve to argpath item (dir or file) + # and stack all Packages found on the way. + # No point in finding packages when collecting doctests. + if not self.config.getoption("doctestmodules", False): + pm = self.config.pluginmanager + for parent in (argpath, *argpath.parents): + if not pm._is_in_confcutdir(argpath): + break + + if parent.is_dir(): + pkginit = parent / "__init__.py" + if pkginit.is_file() and pkginit not in node_cache1: + col = self._collectfile(pkginit, handle_dupes=False) + if col: + if isinstance(col[0], Package): + pkg_roots[parent] = col[0] + node_cache1[col[0].path] = [col[0]] + + # If it's a directory argument, recurse and look for any Subpackages. + # Let the Package collector deal with subnodes, don't collect here. + if argpath.is_dir(): + assert not names, f"invalid arg {(argpath, names)!r}" + + seen_dirs: Set[Path] = set() + for direntry in visit(argpath, self._recurse): + if not direntry.is_file(): + continue + + path = Path(direntry.path) + dirpath = path.parent + + if dirpath not in seen_dirs: + # Collect packages first. + seen_dirs.add(dirpath) + pkginit = dirpath / "__init__.py" + if pkginit.exists(): + for x in self._collectfile(pkginit): + yield x + if isinstance(x, Package): + pkg_roots[dirpath] = x + if dirpath in pkg_roots: + # Do not collect packages here. + continue + + for x in self._collectfile(path): + key2 = (type(x), x.path) + if key2 in node_cache2: + yield node_cache2[key2] + else: + node_cache2[key2] = x + yield x + else: + assert argpath.is_file() + + if argpath in node_cache1: + col = node_cache1[argpath] + else: + collect_root = pkg_roots.get(argpath.parent, self) + col = collect_root._collectfile(argpath, handle_dupes=False) + if col: + node_cache1[argpath] = col + + matching = [] + work: List[ + Tuple[Sequence[Union[nodes.Item, nodes.Collector]], Sequence[str]] + ] = [(col, names)] + while work: + self.trace("matchnodes", col, names) + self.trace.root.indent += 1 + + matchnodes, matchnames = work.pop() + for node in matchnodes: + if not matchnames: + matching.append(node) + continue + if not isinstance(node, nodes.Collector): + continue + key = (type(node), node.nodeid) + if key in matchnodes_cache: + rep = matchnodes_cache[key] + else: + rep = collect_one_node(node) + matchnodes_cache[key] = rep + if rep.passed: + submatchnodes = [] + for r in rep.result: + # TODO: Remove parametrized workaround once collection structure contains + # parametrization. + if ( + r.name == matchnames[0] + or r.name.split("[")[0] == matchnames[0] + ): + submatchnodes.append(r) + if submatchnodes: + work.append((submatchnodes, matchnames[1:])) + else: + # Report collection failures here to avoid failing to run some test + # specified in the command line because the module could not be + # imported (#134). + node.ihook.pytest_collectreport(report=rep) + + self.trace("matchnodes finished -> ", len(matching), "nodes") + self.trace.root.indent -= 1 + + if not matching: + report_arg = "::".join((str(argpath), *names)) + self._notfound.append((report_arg, col)) + continue + + # If __init__.py was the only file requested, then the matched + # node will be the corresponding Package (by default), and the + # first yielded item will be the __init__ Module itself, so + # just use that. If this special case isn't taken, then all the + # files in the package will be yielded. + if argpath.name == "__init__.py" and isinstance(matching[0], Package): + try: + yield next(iter(matching[0].collect())) + except StopIteration: + # The package collects nothing with only an __init__.py + # file in it, which gets ignored by the default + # "python_files" option. + pass + continue + + yield from matching + + self.trace.root.indent -= 1 + + def genitems( + self, node: Union[nodes.Item, nodes.Collector] + ) -> Iterator[nodes.Item]: + self.trace("genitems", node) + if isinstance(node, nodes.Item): + node.ihook.pytest_itemcollected(item=node) + yield node + else: + assert isinstance(node, nodes.Collector) + rep = collect_one_node(node) + if rep.passed: + for subnode in rep.result: + yield from self.genitems(subnode) + node.ihook.pytest_collectreport(report=rep) + + +def search_pypath(module_name: str) -> str: + """Search sys.path for the given a dotted module name, and return its file system path.""" + try: + spec = importlib.util.find_spec(module_name) + # AttributeError: looks like package module, but actually filename + # ImportError: module does not exist + # ValueError: not a module name + except (AttributeError, ImportError, ValueError): + return module_name + if spec is None or spec.origin is None or spec.origin == "namespace": + return module_name + elif spec.submodule_search_locations: + return os.path.dirname(spec.origin) + else: + return spec.origin + + +def resolve_collection_argument( + invocation_path: Path, arg: str, *, as_pypath: bool = False +) -> Tuple[Path, List[str]]: + """Parse path arguments optionally containing selection parts and return (fspath, names). + + Command-line arguments can point to files and/or directories, and optionally contain + parts for specific tests selection, for example: + + "pkg/tests/test_foo.py::TestClass::test_foo" + + This function ensures the path exists, and returns a tuple: + + (Path("/full/path/to/pkg/tests/test_foo.py"), ["TestClass", "test_foo"]) + + When as_pypath is True, expects that the command-line argument actually contains + module paths instead of file-system paths: + + "pkg.tests.test_foo::TestClass::test_foo" + + In which case we search sys.path for a matching module, and then return the *path* to the + found module. + + If the path doesn't exist, raise UsageError. + If the path is a directory and selection parts are present, raise UsageError. + """ + base, squacket, rest = str(arg).partition("[") + strpath, *parts = base.split("::") + if parts: + parts[-1] = f"{parts[-1]}{squacket}{rest}" + if as_pypath: + strpath = search_pypath(strpath) + fspath = invocation_path / strpath + fspath = absolutepath(fspath) + if not safe_exists(fspath): + msg = ( + "module or package not found: {arg} (missing __init__.py?)" + if as_pypath + else "file or directory not found: {arg}" + ) + raise UsageError(msg.format(arg=arg)) + if parts and fspath.is_dir(): + msg = ( + "package argument cannot contain :: selection parts: {arg}" + if as_pypath + else "directory argument cannot contain :: selection parts: {arg}" + ) + raise UsageError(msg.format(arg=arg)) + return fspath, parts diff --git a/venv/lib/python3.12/site-packages/_pytest/mark/__init__.py b/venv/lib/python3.12/site-packages/_pytest/mark/__init__.py new file mode 100644 index 0000000000..de46b4c8a7 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/mark/__init__.py @@ -0,0 +1,269 @@ +"""Generic mechanism for marking and selecting python functions.""" +import dataclasses +from typing import AbstractSet +from typing import Collection +from typing import List +from typing import Optional +from typing import TYPE_CHECKING +from typing import Union + +from .expression import Expression +from .expression import ParseError +from .structures import EMPTY_PARAMETERSET_OPTION +from .structures import get_empty_parameterset_mark +from .structures import Mark +from .structures import MARK_GEN +from .structures import MarkDecorator +from .structures import MarkGenerator +from .structures import ParameterSet +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config import UsageError +from _pytest.config.argparsing import Parser +from _pytest.stash import StashKey + +if TYPE_CHECKING: + from _pytest.nodes import Item + + +__all__ = [ + "MARK_GEN", + "Mark", + "MarkDecorator", + "MarkGenerator", + "ParameterSet", + "get_empty_parameterset_mark", +] + + +old_mark_config_key = StashKey[Optional[Config]]() + + +def param( + *values: object, + marks: Union[MarkDecorator, Collection[Union[MarkDecorator, Mark]]] = (), + id: Optional[str] = None, +) -> ParameterSet: + """Specify a parameter in `pytest.mark.parametrize`_ calls or + :ref:`parametrized fixtures `. + + .. code-block:: python + + @pytest.mark.parametrize( + "test_input,expected", + [ + ("3+5", 8), + pytest.param("6*9", 42, marks=pytest.mark.xfail), + ], + ) + def test_eval(test_input, expected): + assert eval(test_input) == expected + + :param values: Variable args of the values of the parameter set, in order. + :param marks: A single mark or a list of marks to be applied to this parameter set. + :param id: The id to attribute to this parameter set. + """ + return ParameterSet.param(*values, marks=marks, id=id) + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group._addoption( + "-k", + action="store", + dest="keyword", + default="", + metavar="EXPRESSION", + help="Only run tests which match the given substring expression. " + "An expression is a Python evaluatable expression " + "where all names are substring-matched against test names " + "and their parent classes. Example: -k 'test_method or test_" + "other' matches all test functions and classes whose name " + "contains 'test_method' or 'test_other', while -k 'not test_method' " + "matches those that don't contain 'test_method' in their names. " + "-k 'not test_method and not test_other' will eliminate the matches. " + "Additionally keywords are matched to classes and functions " + "containing extra names in their 'extra_keyword_matches' set, " + "as well as functions which have names assigned directly to them. " + "The matching is case-insensitive.", + ) + + group._addoption( + "-m", + action="store", + dest="markexpr", + default="", + metavar="MARKEXPR", + help="Only run tests matching given mark expression. " + "For example: -m 'mark1 and not mark2'.", + ) + + group.addoption( + "--markers", + action="store_true", + help="show markers (builtin, plugin and per-project ones).", + ) + + parser.addini("markers", "Markers for test functions", "linelist") + parser.addini(EMPTY_PARAMETERSET_OPTION, "Default marker for empty parametersets") + + +@hookimpl(tryfirst=True) +def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]: + import _pytest.config + + if config.option.markers: + config._do_configure() + tw = _pytest.config.create_terminal_writer(config) + for line in config.getini("markers"): + parts = line.split(":", 1) + name = parts[0] + rest = parts[1] if len(parts) == 2 else "" + tw.write("@pytest.mark.%s:" % name, bold=True) + tw.line(rest) + tw.line() + config._ensure_unconfigure() + return 0 + + return None + + +@dataclasses.dataclass +class KeywordMatcher: + """A matcher for keywords. + + Given a list of names, matches any substring of one of these names. The + string inclusion check is case-insensitive. + + Will match on the name of colitem, including the names of its parents. + Only matches names of items which are either a :class:`Class` or a + :class:`Function`. + + Additionally, matches on names in the 'extra_keyword_matches' set of + any item, as well as names directly assigned to test functions. + """ + + __slots__ = ("_names",) + + _names: AbstractSet[str] + + @classmethod + def from_item(cls, item: "Item") -> "KeywordMatcher": + mapped_names = set() + + # Add the names of the current item and any parent items. + import pytest + + for node in item.listchain(): + if not isinstance(node, pytest.Session): + mapped_names.add(node.name) + + # Add the names added as extra keywords to current or parent items. + mapped_names.update(item.listextrakeywords()) + + # Add the names attached to the current function through direct assignment. + function_obj = getattr(item, "function", None) + if function_obj: + mapped_names.update(function_obj.__dict__) + + # Add the markers to the keywords as we no longer handle them correctly. + mapped_names.update(mark.name for mark in item.iter_markers()) + + return cls(mapped_names) + + def __call__(self, subname: str) -> bool: + subname = subname.lower() + names = (name.lower() for name in self._names) + + for name in names: + if subname in name: + return True + return False + + +def deselect_by_keyword(items: "List[Item]", config: Config) -> None: + keywordexpr = config.option.keyword.lstrip() + if not keywordexpr: + return + + expr = _parse_expression(keywordexpr, "Wrong expression passed to '-k'") + + remaining = [] + deselected = [] + for colitem in items: + if not expr.evaluate(KeywordMatcher.from_item(colitem)): + deselected.append(colitem) + else: + remaining.append(colitem) + + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +@dataclasses.dataclass +class MarkMatcher: + """A matcher for markers which are present. + + Tries to match on any marker names, attached to the given colitem. + """ + + __slots__ = ("own_mark_names",) + + own_mark_names: AbstractSet[str] + + @classmethod + def from_item(cls, item: "Item") -> "MarkMatcher": + mark_names = {mark.name for mark in item.iter_markers()} + return cls(mark_names) + + def __call__(self, name: str) -> bool: + return name in self.own_mark_names + + +def deselect_by_mark(items: "List[Item]", config: Config) -> None: + matchexpr = config.option.markexpr + if not matchexpr: + return + + expr = _parse_expression(matchexpr, "Wrong expression passed to '-m'") + remaining: List[Item] = [] + deselected: List[Item] = [] + for item in items: + if expr.evaluate(MarkMatcher.from_item(item)): + remaining.append(item) + else: + deselected.append(item) + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +def _parse_expression(expr: str, exc_message: str) -> Expression: + try: + return Expression.compile(expr) + except ParseError as e: + raise UsageError(f"{exc_message}: {expr}: {e}") from None + + +def pytest_collection_modifyitems(items: "List[Item]", config: Config) -> None: + deselect_by_keyword(items, config) + deselect_by_mark(items, config) + + +def pytest_configure(config: Config) -> None: + config.stash[old_mark_config_key] = MARK_GEN._config + MARK_GEN._config = config + + empty_parameterset = config.getini(EMPTY_PARAMETERSET_OPTION) + + if empty_parameterset not in ("skip", "xfail", "fail_at_collect", None, ""): + raise UsageError( + "{!s} must be one of skip, xfail or fail_at_collect" + " but it is {!r}".format(EMPTY_PARAMETERSET_OPTION, empty_parameterset) + ) + + +def pytest_unconfigure(config: Config) -> None: + MARK_GEN._config = config.stash.get(old_mark_config_key, None) diff --git a/venv/lib/python3.12/site-packages/_pytest/mark/expression.py b/venv/lib/python3.12/site-packages/_pytest/mark/expression.py new file mode 100644 index 0000000000..9287bcee50 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/mark/expression.py @@ -0,0 +1,228 @@ +r"""Evaluate match expressions, as used by `-k` and `-m`. + +The grammar is: + +expression: expr? EOF +expr: and_expr ('or' and_expr)* +and_expr: not_expr ('and' not_expr)* +not_expr: 'not' not_expr | '(' expr ')' | ident +ident: (\w|:|\+|-|\.|\[|\]|\\|/)+ + +The semantics are: + +- Empty expression evaluates to False. +- ident evaluates to True of False according to a provided matcher function. +- or/and/not evaluate according to the usual boolean semantics. +""" +import ast +import dataclasses +import enum +import re +import sys +import types +from typing import Callable +from typing import Iterator +from typing import Mapping +from typing import NoReturn +from typing import Optional +from typing import Sequence + +if sys.version_info >= (3, 8): + astNameConstant = ast.Constant +else: + astNameConstant = ast.NameConstant + + +__all__ = [ + "Expression", + "ParseError", +] + + +class TokenType(enum.Enum): + LPAREN = "left parenthesis" + RPAREN = "right parenthesis" + OR = "or" + AND = "and" + NOT = "not" + IDENT = "identifier" + EOF = "end of input" + + +@dataclasses.dataclass(frozen=True) +class Token: + __slots__ = ("type", "value", "pos") + type: TokenType + value: str + pos: int + + +class ParseError(Exception): + """The expression contains invalid syntax. + + :param column: The column in the line where the error occurred (1-based). + :param message: A description of the error. + """ + + def __init__(self, column: int, message: str) -> None: + self.column = column + self.message = message + + def __str__(self) -> str: + return f"at column {self.column}: {self.message}" + + +class Scanner: + __slots__ = ("tokens", "current") + + def __init__(self, input: str) -> None: + self.tokens = self.lex(input) + self.current = next(self.tokens) + + def lex(self, input: str) -> Iterator[Token]: + pos = 0 + while pos < len(input): + if input[pos] in (" ", "\t"): + pos += 1 + elif input[pos] == "(": + yield Token(TokenType.LPAREN, "(", pos) + pos += 1 + elif input[pos] == ")": + yield Token(TokenType.RPAREN, ")", pos) + pos += 1 + else: + match = re.match(r"(:?\w|:|\+|-|\.|\[|\]|\\|/)+", input[pos:]) + if match: + value = match.group(0) + if value == "or": + yield Token(TokenType.OR, value, pos) + elif value == "and": + yield Token(TokenType.AND, value, pos) + elif value == "not": + yield Token(TokenType.NOT, value, pos) + else: + yield Token(TokenType.IDENT, value, pos) + pos += len(value) + else: + raise ParseError( + pos + 1, + f'unexpected character "{input[pos]}"', + ) + yield Token(TokenType.EOF, "", pos) + + def accept(self, type: TokenType, *, reject: bool = False) -> Optional[Token]: + if self.current.type is type: + token = self.current + if token.type is not TokenType.EOF: + self.current = next(self.tokens) + return token + if reject: + self.reject((type,)) + return None + + def reject(self, expected: Sequence[TokenType]) -> NoReturn: + raise ParseError( + self.current.pos + 1, + "expected {}; got {}".format( + " OR ".join(type.value for type in expected), + self.current.type.value, + ), + ) + + +# True, False and None are legal match expression identifiers, +# but illegal as Python identifiers. To fix this, this prefix +# is added to identifiers in the conversion to Python AST. +IDENT_PREFIX = "$" + + +def expression(s: Scanner) -> ast.Expression: + if s.accept(TokenType.EOF): + ret: ast.expr = astNameConstant(False) + else: + ret = expr(s) + s.accept(TokenType.EOF, reject=True) + return ast.fix_missing_locations(ast.Expression(ret)) + + +def expr(s: Scanner) -> ast.expr: + ret = and_expr(s) + while s.accept(TokenType.OR): + rhs = and_expr(s) + ret = ast.BoolOp(ast.Or(), [ret, rhs]) + return ret + + +def and_expr(s: Scanner) -> ast.expr: + ret = not_expr(s) + while s.accept(TokenType.AND): + rhs = not_expr(s) + ret = ast.BoolOp(ast.And(), [ret, rhs]) + return ret + + +def not_expr(s: Scanner) -> ast.expr: + if s.accept(TokenType.NOT): + return ast.UnaryOp(ast.Not(), not_expr(s)) + if s.accept(TokenType.LPAREN): + ret = expr(s) + s.accept(TokenType.RPAREN, reject=True) + return ret + ident = s.accept(TokenType.IDENT) + if ident: + return ast.Name(IDENT_PREFIX + ident.value, ast.Load()) + s.reject((TokenType.NOT, TokenType.LPAREN, TokenType.IDENT)) + + +class MatcherAdapter(Mapping[str, bool]): + """Adapts a matcher function to a locals mapping as required by eval().""" + + def __init__(self, matcher: Callable[[str], bool]) -> None: + self.matcher = matcher + + def __getitem__(self, key: str) -> bool: + return self.matcher(key[len(IDENT_PREFIX) :]) + + def __iter__(self) -> Iterator[str]: + raise NotImplementedError() + + def __len__(self) -> int: + raise NotImplementedError() + + +class Expression: + """A compiled match expression as used by -k and -m. + + The expression can be evaluated against different matchers. + """ + + __slots__ = ("code",) + + def __init__(self, code: types.CodeType) -> None: + self.code = code + + @classmethod + def compile(self, input: str) -> "Expression": + """Compile a match expression. + + :param input: The input expression - one line. + """ + astexpr = expression(Scanner(input)) + code: types.CodeType = compile( + astexpr, + filename="", + mode="eval", + ) + return Expression(code) + + def evaluate(self, matcher: Callable[[str], bool]) -> bool: + """Evaluate the match expression. + + :param matcher: + Given an identifier, should return whether it matches or not. + Should be prepared to handle arbitrary strings as input. + + :returns: Whether the expression matches or not. + """ + ret: bool = eval(self.code, {"__builtins__": {}}, MatcherAdapter(matcher)) + return ret diff --git a/venv/lib/python3.12/site-packages/_pytest/mark/structures.py b/venv/lib/python3.12/site-packages/_pytest/mark/structures.py new file mode 100644 index 0000000000..55620f0429 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/mark/structures.py @@ -0,0 +1,618 @@ +import collections.abc +import dataclasses +import inspect +import warnings +from typing import Any +from typing import Callable +from typing import Collection +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Mapping +from typing import MutableMapping +from typing import NamedTuple +from typing import Optional +from typing import overload +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from .._code import getfslineno +from ..compat import ascii_escaped +from ..compat import final +from ..compat import NOTSET +from ..compat import NotSetType +from _pytest.config import Config +from _pytest.deprecated import check_ispytest +from _pytest.outcomes import fail +from _pytest.warning_types import PytestUnknownMarkWarning + +if TYPE_CHECKING: + from ..nodes import Node + + +EMPTY_PARAMETERSET_OPTION = "empty_parameter_set_mark" + + +def istestfunc(func) -> bool: + return callable(func) and getattr(func, "__name__", "") != "" + + +def get_empty_parameterset_mark( + config: Config, argnames: Sequence[str], func +) -> "MarkDecorator": + from ..nodes import Collector + + fs, lineno = getfslineno(func) + reason = "got empty parameter set %r, function %s at %s:%d" % ( + argnames, + func.__name__, + fs, + lineno, + ) + + requested_mark = config.getini(EMPTY_PARAMETERSET_OPTION) + if requested_mark in ("", None, "skip"): + mark = MARK_GEN.skip(reason=reason) + elif requested_mark == "xfail": + mark = MARK_GEN.xfail(reason=reason, run=False) + elif requested_mark == "fail_at_collect": + f_name = func.__name__ + _, lineno = getfslineno(func) + raise Collector.CollectError( + "Empty parameter set in '%s' at line %d" % (f_name, lineno + 1) + ) + else: + raise LookupError(requested_mark) + return mark + + +class ParameterSet(NamedTuple): + values: Sequence[Union[object, NotSetType]] + marks: Collection[Union["MarkDecorator", "Mark"]] + id: Optional[str] + + @classmethod + def param( + cls, + *values: object, + marks: Union["MarkDecorator", Collection[Union["MarkDecorator", "Mark"]]] = (), + id: Optional[str] = None, + ) -> "ParameterSet": + if isinstance(marks, MarkDecorator): + marks = (marks,) + else: + assert isinstance(marks, collections.abc.Collection) + + if id is not None: + if not isinstance(id, str): + raise TypeError(f"Expected id to be a string, got {type(id)}: {id!r}") + id = ascii_escaped(id) + return cls(values, marks, id) + + @classmethod + def extract_from( + cls, + parameterset: Union["ParameterSet", Sequence[object], object], + force_tuple: bool = False, + ) -> "ParameterSet": + """Extract from an object or objects. + + :param parameterset: + A legacy style parameterset that may or may not be a tuple, + and may or may not be wrapped into a mess of mark objects. + + :param force_tuple: + Enforce tuple wrapping so single argument tuple values + don't get decomposed and break tests. + """ + + if isinstance(parameterset, cls): + return parameterset + if force_tuple: + return cls.param(parameterset) + else: + # TODO: Refactor to fix this type-ignore. Currently the following + # passes type-checking but crashes: + # + # @pytest.mark.parametrize(('x', 'y'), [1, 2]) + # def test_foo(x, y): pass + return cls(parameterset, marks=[], id=None) # type: ignore[arg-type] + + @staticmethod + def _parse_parametrize_args( + argnames: Union[str, Sequence[str]], + argvalues: Iterable[Union["ParameterSet", Sequence[object], object]], + *args, + **kwargs, + ) -> Tuple[Sequence[str], bool]: + if isinstance(argnames, str): + argnames = [x.strip() for x in argnames.split(",") if x.strip()] + force_tuple = len(argnames) == 1 + else: + force_tuple = False + return argnames, force_tuple + + @staticmethod + def _parse_parametrize_parameters( + argvalues: Iterable[Union["ParameterSet", Sequence[object], object]], + force_tuple: bool, + ) -> List["ParameterSet"]: + return [ + ParameterSet.extract_from(x, force_tuple=force_tuple) for x in argvalues + ] + + @classmethod + def _for_parametrize( + cls, + argnames: Union[str, Sequence[str]], + argvalues: Iterable[Union["ParameterSet", Sequence[object], object]], + func, + config: Config, + nodeid: str, + ) -> Tuple[Sequence[str], List["ParameterSet"]]: + argnames, force_tuple = cls._parse_parametrize_args(argnames, argvalues) + parameters = cls._parse_parametrize_parameters(argvalues, force_tuple) + del argvalues + + if parameters: + # Check all parameter sets have the correct number of values. + for param in parameters: + if len(param.values) != len(argnames): + msg = ( + '{nodeid}: in "parametrize" the number of names ({names_len}):\n' + " {names}\n" + "must be equal to the number of values ({values_len}):\n" + " {values}" + ) + fail( + msg.format( + nodeid=nodeid, + values=param.values, + names=argnames, + names_len=len(argnames), + values_len=len(param.values), + ), + pytrace=False, + ) + else: + # Empty parameter set (likely computed at runtime): create a single + # parameter set with NOTSET values, with the "empty parameter set" mark applied to it. + mark = get_empty_parameterset_mark(config, argnames, func) + parameters.append( + ParameterSet(values=(NOTSET,) * len(argnames), marks=[mark], id=None) + ) + return argnames, parameters + + +@final +@dataclasses.dataclass(frozen=True) +class Mark: + """A pytest mark.""" + + #: Name of the mark. + name: str + #: Positional arguments of the mark decorator. + args: Tuple[Any, ...] + #: Keyword arguments of the mark decorator. + kwargs: Mapping[str, Any] + + #: Source Mark for ids with parametrize Marks. + _param_ids_from: Optional["Mark"] = dataclasses.field(default=None, repr=False) + #: Resolved/generated ids with parametrize Marks. + _param_ids_generated: Optional[Sequence[str]] = dataclasses.field( + default=None, repr=False + ) + + def __init__( + self, + name: str, + args: Tuple[Any, ...], + kwargs: Mapping[str, Any], + param_ids_from: Optional["Mark"] = None, + param_ids_generated: Optional[Sequence[str]] = None, + *, + _ispytest: bool = False, + ) -> None: + """:meta private:""" + check_ispytest(_ispytest) + # Weirdness to bypass frozen=True. + object.__setattr__(self, "name", name) + object.__setattr__(self, "args", args) + object.__setattr__(self, "kwargs", kwargs) + object.__setattr__(self, "_param_ids_from", param_ids_from) + object.__setattr__(self, "_param_ids_generated", param_ids_generated) + + def _has_param_ids(self) -> bool: + return "ids" in self.kwargs or len(self.args) >= 4 + + def combined_with(self, other: "Mark") -> "Mark": + """Return a new Mark which is a combination of this + Mark and another Mark. + + Combines by appending args and merging kwargs. + + :param Mark other: The mark to combine with. + :rtype: Mark + """ + assert self.name == other.name + + # Remember source of ids with parametrize Marks. + param_ids_from: Optional[Mark] = None + if self.name == "parametrize": + if other._has_param_ids(): + param_ids_from = other + elif self._has_param_ids(): + param_ids_from = self + + return Mark( + self.name, + self.args + other.args, + dict(self.kwargs, **other.kwargs), + param_ids_from=param_ids_from, + _ispytest=True, + ) + + +# A generic parameter designating an object to which a Mark may +# be applied -- a test function (callable) or class. +# Note: a lambda is not allowed, but this can't be represented. +Markable = TypeVar("Markable", bound=Union[Callable[..., object], type]) + + +@dataclasses.dataclass +class MarkDecorator: + """A decorator for applying a mark on test functions and classes. + + ``MarkDecorators`` are created with ``pytest.mark``:: + + mark1 = pytest.mark.NAME # Simple MarkDecorator + mark2 = pytest.mark.NAME(name1=value) # Parametrized MarkDecorator + + and can then be applied as decorators to test functions:: + + @mark2 + def test_function(): + pass + + When a ``MarkDecorator`` is called, it does the following: + + 1. If called with a single class as its only positional argument and no + additional keyword arguments, it attaches the mark to the class so it + gets applied automatically to all test cases found in that class. + + 2. If called with a single function as its only positional argument and + no additional keyword arguments, it attaches the mark to the function, + containing all the arguments already stored internally in the + ``MarkDecorator``. + + 3. When called in any other case, it returns a new ``MarkDecorator`` + instance with the original ``MarkDecorator``'s content updated with + the arguments passed to this call. + + Note: The rules above prevent a ``MarkDecorator`` from storing only a + single function or class reference as its positional argument with no + additional keyword or positional arguments. You can work around this by + using `with_args()`. + """ + + mark: Mark + + def __init__(self, mark: Mark, *, _ispytest: bool = False) -> None: + """:meta private:""" + check_ispytest(_ispytest) + self.mark = mark + + @property + def name(self) -> str: + """Alias for mark.name.""" + return self.mark.name + + @property + def args(self) -> Tuple[Any, ...]: + """Alias for mark.args.""" + return self.mark.args + + @property + def kwargs(self) -> Mapping[str, Any]: + """Alias for mark.kwargs.""" + return self.mark.kwargs + + @property + def markname(self) -> str: + """:meta private:""" + return self.name # for backward-compat (2.4.1 had this attr) + + def with_args(self, *args: object, **kwargs: object) -> "MarkDecorator": + """Return a MarkDecorator with extra arguments added. + + Unlike calling the MarkDecorator, with_args() can be used even + if the sole argument is a callable/class. + """ + mark = Mark(self.name, args, kwargs, _ispytest=True) + return MarkDecorator(self.mark.combined_with(mark), _ispytest=True) + + # Type ignored because the overloads overlap with an incompatible + # return type. Not much we can do about that. Thankfully mypy picks + # the first match so it works out even if we break the rules. + @overload + def __call__(self, arg: Markable) -> Markable: # type: ignore[misc] + pass + + @overload + def __call__(self, *args: object, **kwargs: object) -> "MarkDecorator": + pass + + def __call__(self, *args: object, **kwargs: object): + """Call the MarkDecorator.""" + if args and not kwargs: + func = args[0] + is_class = inspect.isclass(func) + if len(args) == 1 and (istestfunc(func) or is_class): + store_mark(func, self.mark) + return func + return self.with_args(*args, **kwargs) + + +def get_unpacked_marks( + obj: Union[object, type], + *, + consider_mro: bool = True, +) -> List[Mark]: + """Obtain the unpacked marks that are stored on an object. + + If obj is a class and consider_mro is true, return marks applied to + this class and all of its super-classes in MRO order. If consider_mro + is false, only return marks applied directly to this class. + """ + if isinstance(obj, type): + if not consider_mro: + mark_lists = [obj.__dict__.get("pytestmark", [])] + else: + mark_lists = [ + x.__dict__.get("pytestmark", []) for x in reversed(obj.__mro__) + ] + mark_list = [] + for item in mark_lists: + if isinstance(item, list): + mark_list.extend(item) + else: + mark_list.append(item) + else: + mark_attribute = getattr(obj, "pytestmark", []) + if isinstance(mark_attribute, list): + mark_list = mark_attribute + else: + mark_list = [mark_attribute] + return list(normalize_mark_list(mark_list)) + + +def normalize_mark_list( + mark_list: Iterable[Union[Mark, MarkDecorator]] +) -> Iterable[Mark]: + """ + Normalize an iterable of Mark or MarkDecorator objects into a list of marks + by retrieving the `mark` attribute on MarkDecorator instances. + + :param mark_list: marks to normalize + :returns: A new list of the extracted Mark objects + """ + for mark in mark_list: + mark_obj = getattr(mark, "mark", mark) + if not isinstance(mark_obj, Mark): + raise TypeError(f"got {repr(mark_obj)} instead of Mark") + yield mark_obj + + +def store_mark(obj, mark: Mark) -> None: + """Store a Mark on an object. + + This is used to implement the Mark declarations/decorators correctly. + """ + assert isinstance(mark, Mark), mark + # Always reassign name to avoid updating pytestmark in a reference that + # was only borrowed. + obj.pytestmark = [*get_unpacked_marks(obj, consider_mro=False), mark] + + +# Typing for builtin pytest marks. This is cheating; it gives builtin marks +# special privilege, and breaks modularity. But practicality beats purity... +if TYPE_CHECKING: + from _pytest.scope import _ScopeName + + class _SkipMarkDecorator(MarkDecorator): + @overload # type: ignore[override,misc,no-overload-impl] + def __call__(self, arg: Markable) -> Markable: + ... + + @overload + def __call__(self, reason: str = ...) -> "MarkDecorator": + ... + + class _SkipifMarkDecorator(MarkDecorator): + def __call__( # type: ignore[override] + self, + condition: Union[str, bool] = ..., + *conditions: Union[str, bool], + reason: str = ..., + ) -> MarkDecorator: + ... + + class _XfailMarkDecorator(MarkDecorator): + @overload # type: ignore[override,misc,no-overload-impl] + def __call__(self, arg: Markable) -> Markable: + ... + + @overload + def __call__( + self, + condition: Union[str, bool] = ..., + *conditions: Union[str, bool], + reason: str = ..., + run: bool = ..., + raises: Union[Type[BaseException], Tuple[Type[BaseException], ...]] = ..., + strict: bool = ..., + ) -> MarkDecorator: + ... + + class _ParametrizeMarkDecorator(MarkDecorator): + def __call__( # type: ignore[override] + self, + argnames: Union[str, Sequence[str]], + argvalues: Iterable[Union[ParameterSet, Sequence[object], object]], + *, + indirect: Union[bool, Sequence[str]] = ..., + ids: Optional[ + Union[ + Iterable[Union[None, str, float, int, bool]], + Callable[[Any], Optional[object]], + ] + ] = ..., + scope: Optional[_ScopeName] = ..., + ) -> MarkDecorator: + ... + + class _UsefixturesMarkDecorator(MarkDecorator): + def __call__(self, *fixtures: str) -> MarkDecorator: # type: ignore[override] + ... + + class _FilterwarningsMarkDecorator(MarkDecorator): + def __call__(self, *filters: str) -> MarkDecorator: # type: ignore[override] + ... + + +@final +class MarkGenerator: + """Factory for :class:`MarkDecorator` objects - exposed as + a ``pytest.mark`` singleton instance. + + Example:: + + import pytest + + @pytest.mark.slowtest + def test_function(): + pass + + applies a 'slowtest' :class:`Mark` on ``test_function``. + """ + + # See TYPE_CHECKING above. + if TYPE_CHECKING: + skip: _SkipMarkDecorator + skipif: _SkipifMarkDecorator + xfail: _XfailMarkDecorator + parametrize: _ParametrizeMarkDecorator + usefixtures: _UsefixturesMarkDecorator + filterwarnings: _FilterwarningsMarkDecorator + + def __init__(self, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + self._config: Optional[Config] = None + self._markers: Set[str] = set() + + def __getattr__(self, name: str) -> MarkDecorator: + """Generate a new :class:`MarkDecorator` with the given name.""" + if name[0] == "_": + raise AttributeError("Marker name must NOT start with underscore") + + if self._config is not None: + # We store a set of markers as a performance optimisation - if a mark + # name is in the set we definitely know it, but a mark may be known and + # not in the set. We therefore start by updating the set! + if name not in self._markers: + for line in self._config.getini("markers"): + # example lines: "skipif(condition): skip the given test if..." + # or "hypothesis: tests which use Hypothesis", so to get the + # marker name we split on both `:` and `(`. + marker = line.split(":")[0].split("(")[0].strip() + self._markers.add(marker) + + # If the name is not in the set of known marks after updating, + # then it really is time to issue a warning or an error. + if name not in self._markers: + if self._config.option.strict_markers or self._config.option.strict: + fail( + f"{name!r} not found in `markers` configuration option", + pytrace=False, + ) + + # Raise a specific error for common misspellings of "parametrize". + if name in ["parameterize", "parametrise", "parameterise"]: + __tracebackhide__ = True + fail(f"Unknown '{name}' mark, did you mean 'parametrize'?") + + warnings.warn( + "Unknown pytest.mark.%s - is this a typo? You can register " + "custom marks to avoid this warning - for details, see " + "https://docs.pytest.org/en/stable/how-to/mark.html" % name, + PytestUnknownMarkWarning, + 2, + ) + + return MarkDecorator(Mark(name, (), {}, _ispytest=True), _ispytest=True) + + +MARK_GEN = MarkGenerator(_ispytest=True) + + +@final +class NodeKeywords(MutableMapping[str, Any]): + __slots__ = ("node", "parent", "_markers") + + def __init__(self, node: "Node") -> None: + self.node = node + self.parent = node.parent + self._markers = {node.name: True} + + def __getitem__(self, key: str) -> Any: + try: + return self._markers[key] + except KeyError: + if self.parent is None: + raise + return self.parent.keywords[key] + + def __setitem__(self, key: str, value: Any) -> None: + self._markers[key] = value + + # Note: we could've avoided explicitly implementing some of the methods + # below and use the collections.abc fallback, but that would be slow. + + def __contains__(self, key: object) -> bool: + return ( + key in self._markers + or self.parent is not None + and key in self.parent.keywords + ) + + def update( # type: ignore[override] + self, + other: Union[Mapping[str, Any], Iterable[Tuple[str, Any]]] = (), + **kwds: Any, + ) -> None: + self._markers.update(other) + self._markers.update(kwds) + + def __delitem__(self, key: str) -> None: + raise ValueError("cannot delete key in keywords dict") + + def __iter__(self) -> Iterator[str]: + # Doesn't need to be fast. + yield from self._markers + if self.parent is not None: + for keyword in self.parent.keywords: + # self._marks and self.parent.keywords can have duplicates. + if keyword not in self._markers: + yield keyword + + def __len__(self) -> int: + # Doesn't need to be fast. + return sum(1 for keyword in self) + + def __repr__(self) -> str: + return f"" diff --git a/venv/lib/python3.12/site-packages/_pytest/monkeypatch.py b/venv/lib/python3.12/site-packages/_pytest/monkeypatch.py new file mode 100644 index 0000000000..9e51ff3353 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/monkeypatch.py @@ -0,0 +1,421 @@ +"""Monkeypatching and mocking functionality.""" +import os +import re +import sys +import warnings +from contextlib import contextmanager +from typing import Any +from typing import Generator +from typing import List +from typing import Mapping +from typing import MutableMapping +from typing import Optional +from typing import overload +from typing import Tuple +from typing import TypeVar +from typing import Union + +from _pytest.compat import final +from _pytest.fixtures import fixture +from _pytest.warning_types import PytestWarning + +RE_IMPORT_ERROR_NAME = re.compile(r"^No module named (.*)$") + + +K = TypeVar("K") +V = TypeVar("V") + + +@fixture +def monkeypatch() -> Generator["MonkeyPatch", None, None]: + """A convenient fixture for monkey-patching. + + The fixture provides these methods to modify objects, dictionaries, or + :data:`os.environ`: + + * :meth:`monkeypatch.setattr(obj, name, value, raising=True) ` + * :meth:`monkeypatch.delattr(obj, name, raising=True) ` + * :meth:`monkeypatch.setitem(mapping, name, value) ` + * :meth:`monkeypatch.delitem(obj, name, raising=True) ` + * :meth:`monkeypatch.setenv(name, value, prepend=None) ` + * :meth:`monkeypatch.delenv(name, raising=True) ` + * :meth:`monkeypatch.syspath_prepend(path) ` + * :meth:`monkeypatch.chdir(path) ` + * :meth:`monkeypatch.context() ` + + All modifications will be undone after the requesting test function or + fixture has finished. The ``raising`` parameter determines if a :class:`KeyError` + or :class:`AttributeError` will be raised if the set/deletion operation does not have the + specified target. + + To undo modifications done by the fixture in a contained scope, + use :meth:`context() `. + """ + mpatch = MonkeyPatch() + yield mpatch + mpatch.undo() + + +def resolve(name: str) -> object: + # Simplified from zope.dottedname. + parts = name.split(".") + + used = parts.pop(0) + found: object = __import__(used) + for part in parts: + used += "." + part + try: + found = getattr(found, part) + except AttributeError: + pass + else: + continue + # We use explicit un-nesting of the handling block in order + # to avoid nested exceptions. + try: + __import__(used) + except ImportError as ex: + expected = str(ex).split()[-1] + if expected == used: + raise + else: + raise ImportError(f"import error in {used}: {ex}") from ex + found = annotated_getattr(found, part, used) + return found + + +def annotated_getattr(obj: object, name: str, ann: str) -> object: + try: + obj = getattr(obj, name) + except AttributeError as e: + raise AttributeError( + "{!r} object at {} has no attribute {!r}".format( + type(obj).__name__, ann, name + ) + ) from e + return obj + + +def derive_importpath(import_path: str, raising: bool) -> Tuple[str, object]: + if not isinstance(import_path, str) or "." not in import_path: + raise TypeError(f"must be absolute import path string, not {import_path!r}") + module, attr = import_path.rsplit(".", 1) + target = resolve(module) + if raising: + annotated_getattr(target, attr, ann=module) + return attr, target + + +class Notset: + def __repr__(self) -> str: + return "" + + +notset = Notset() + + +@final +class MonkeyPatch: + """Helper to conveniently monkeypatch attributes/items/environment + variables/syspath. + + Returned by the :fixture:`monkeypatch` fixture. + + .. versionchanged:: 6.2 + Can now also be used directly as `pytest.MonkeyPatch()`, for when + the fixture is not available. In this case, use + :meth:`with MonkeyPatch.context() as mp: ` or remember to call + :meth:`undo` explicitly. + """ + + def __init__(self) -> None: + self._setattr: List[Tuple[object, str, object]] = [] + self._setitem: List[Tuple[Mapping[Any, Any], object, object]] = [] + self._cwd: Optional[str] = None + self._savesyspath: Optional[List[str]] = None + + @classmethod + @contextmanager + def context(cls) -> Generator["MonkeyPatch", None, None]: + """Context manager that returns a new :class:`MonkeyPatch` object + which undoes any patching done inside the ``with`` block upon exit. + + Example: + + .. code-block:: python + + import functools + + + def test_partial(monkeypatch): + with monkeypatch.context() as m: + m.setattr(functools, "partial", 3) + + Useful in situations where it is desired to undo some patches before the test ends, + such as mocking ``stdlib`` functions that might break pytest itself if mocked (for examples + of this see :issue:`3290`). + """ + m = cls() + try: + yield m + finally: + m.undo() + + @overload + def setattr( + self, + target: str, + name: object, + value: Notset = ..., + raising: bool = ..., + ) -> None: + ... + + @overload + def setattr( + self, + target: object, + name: str, + value: object, + raising: bool = ..., + ) -> None: + ... + + def setattr( + self, + target: Union[str, object], + name: Union[object, str], + value: object = notset, + raising: bool = True, + ) -> None: + """ + Set attribute value on target, memorizing the old value. + + For example: + + .. code-block:: python + + import os + + monkeypatch.setattr(os, "getcwd", lambda: "/") + + The code above replaces the :func:`os.getcwd` function by a ``lambda`` which + always returns ``"/"``. + + For convenience, you can specify a string as ``target`` which + will be interpreted as a dotted import path, with the last part + being the attribute name: + + .. code-block:: python + + monkeypatch.setattr("os.getcwd", lambda: "/") + + Raises :class:`AttributeError` if the attribute does not exist, unless + ``raising`` is set to False. + + **Where to patch** + + ``monkeypatch.setattr`` works by (temporarily) changing the object that a name points to with another one. + There can be many names pointing to any individual object, so for patching to work you must ensure + that you patch the name used by the system under test. + + See the section :ref:`Where to patch ` in the :mod:`unittest.mock` + docs for a complete explanation, which is meant for :func:`unittest.mock.patch` but + applies to ``monkeypatch.setattr`` as well. + """ + __tracebackhide__ = True + import inspect + + if isinstance(value, Notset): + if not isinstance(target, str): + raise TypeError( + "use setattr(target, name, value) or " + "setattr(target, value) with target being a dotted " + "import string" + ) + value = name + name, target = derive_importpath(target, raising) + else: + if not isinstance(name, str): + raise TypeError( + "use setattr(target, name, value) with name being a string or " + "setattr(target, value) with target being a dotted " + "import string" + ) + + oldval = getattr(target, name, notset) + if raising and oldval is notset: + raise AttributeError(f"{target!r} has no attribute {name!r}") + + # avoid class descriptors like staticmethod/classmethod + if inspect.isclass(target): + oldval = target.__dict__.get(name, notset) + self._setattr.append((target, name, oldval)) + setattr(target, name, value) + + def delattr( + self, + target: Union[object, str], + name: Union[str, Notset] = notset, + raising: bool = True, + ) -> None: + """Delete attribute ``name`` from ``target``. + + If no ``name`` is specified and ``target`` is a string + it will be interpreted as a dotted import path with the + last part being the attribute name. + + Raises AttributeError it the attribute does not exist, unless + ``raising`` is set to False. + """ + __tracebackhide__ = True + import inspect + + if isinstance(name, Notset): + if not isinstance(target, str): + raise TypeError( + "use delattr(target, name) or " + "delattr(target) with target being a dotted " + "import string" + ) + name, target = derive_importpath(target, raising) + + if not hasattr(target, name): + if raising: + raise AttributeError(name) + else: + oldval = getattr(target, name, notset) + # Avoid class descriptors like staticmethod/classmethod. + if inspect.isclass(target): + oldval = target.__dict__.get(name, notset) + self._setattr.append((target, name, oldval)) + delattr(target, name) + + def setitem(self, dic: Mapping[K, V], name: K, value: V) -> None: + """Set dictionary entry ``name`` to value.""" + self._setitem.append((dic, name, dic.get(name, notset))) + # Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict + dic[name] = value # type: ignore[index] + + def delitem(self, dic: Mapping[K, V], name: K, raising: bool = True) -> None: + """Delete ``name`` from dict. + + Raises ``KeyError`` if it doesn't exist, unless ``raising`` is set to + False. + """ + if name not in dic: + if raising: + raise KeyError(name) + else: + self._setitem.append((dic, name, dic.get(name, notset))) + # Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict + del dic[name] # type: ignore[attr-defined] + + def setenv(self, name: str, value: str, prepend: Optional[str] = None) -> None: + """Set environment variable ``name`` to ``value``. + + If ``prepend`` is a character, read the current environment variable + value and prepend the ``value`` adjoined with the ``prepend`` + character. + """ + if not isinstance(value, str): + warnings.warn( # type: ignore[unreachable] + PytestWarning( + "Value of environment variable {name} type should be str, but got " + "{value!r} (type: {type}); converted to str implicitly".format( + name=name, value=value, type=type(value).__name__ + ) + ), + stacklevel=2, + ) + value = str(value) + if prepend and name in os.environ: + value = value + prepend + os.environ[name] + self.setitem(os.environ, name, value) + + def delenv(self, name: str, raising: bool = True) -> None: + """Delete ``name`` from the environment. + + Raises ``KeyError`` if it does not exist, unless ``raising`` is set to + False. + """ + environ: MutableMapping[str, str] = os.environ + self.delitem(environ, name, raising=raising) + + def syspath_prepend(self, path) -> None: + """Prepend ``path`` to ``sys.path`` list of import locations.""" + + if self._savesyspath is None: + self._savesyspath = sys.path[:] + sys.path.insert(0, str(path)) + + # https://github.com/pypa/setuptools/blob/d8b901bc/docs/pkg_resources.txt#L162-L171 + # this is only needed when pkg_resources was already loaded by the namespace package + if "pkg_resources" in sys.modules: + from pkg_resources import fixup_namespace_packages + + fixup_namespace_packages(str(path)) + + # A call to syspathinsert() usually means that the caller wants to + # import some dynamically created files, thus with python3 we + # invalidate its import caches. + # This is especially important when any namespace package is in use, + # since then the mtime based FileFinder cache (that gets created in + # this case already) gets not invalidated when writing the new files + # quickly afterwards. + from importlib import invalidate_caches + + invalidate_caches() + + def chdir(self, path: Union[str, "os.PathLike[str]"]) -> None: + """Change the current working directory to the specified path. + + :param path: + The path to change into. + """ + if self._cwd is None: + self._cwd = os.getcwd() + os.chdir(path) + + def undo(self) -> None: + """Undo previous changes. + + This call consumes the undo stack. Calling it a second time has no + effect unless you do more monkeypatching after the undo call. + + There is generally no need to call `undo()`, since it is + called automatically during tear-down. + + .. note:: + The same `monkeypatch` fixture is used across a + single test function invocation. If `monkeypatch` is used both by + the test function itself and one of the test fixtures, + calling `undo()` will undo all of the changes made in + both functions. + + Prefer to use :meth:`context() ` instead. + """ + for obj, name, value in reversed(self._setattr): + if value is not notset: + setattr(obj, name, value) + else: + delattr(obj, name) + self._setattr[:] = [] + for dictionary, key, value in reversed(self._setitem): + if value is notset: + try: + # Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict + del dictionary[key] # type: ignore[attr-defined] + except KeyError: + pass # Was already deleted, so we have the desired state. + else: + # Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict + dictionary[key] = value # type: ignore[index] + self._setitem[:] = [] + if self._savesyspath is not None: + sys.path[:] = self._savesyspath + self._savesyspath = None + + if self._cwd is not None: + os.chdir(self._cwd) + self._cwd = None diff --git a/venv/lib/python3.12/site-packages/_pytest/nodes.py b/venv/lib/python3.12/site-packages/_pytest/nodes.py new file mode 100644 index 0000000000..a5313cb765 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/nodes.py @@ -0,0 +1,783 @@ +import os +import warnings +from inspect import signature +from pathlib import Path +from typing import Any +from typing import Callable +from typing import cast +from typing import Iterable +from typing import Iterator +from typing import List +from typing import MutableMapping +from typing import Optional +from typing import overload +from typing import Set +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +import _pytest._code +from _pytest._code import getfslineno +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import TerminalRepr +from _pytest._code.code import Traceback +from _pytest.compat import cached_property +from _pytest.compat import LEGACY_PATH +from _pytest.config import Config +from _pytest.config import ConftestImportFailure +from _pytest.deprecated import FSCOLLECTOR_GETHOOKPROXY_ISINITPATH +from _pytest.deprecated import NODE_CTOR_FSPATH_ARG +from _pytest.mark.structures import Mark +from _pytest.mark.structures import MarkDecorator +from _pytest.mark.structures import NodeKeywords +from _pytest.outcomes import fail +from _pytest.pathlib import absolutepath +from _pytest.pathlib import commonpath +from _pytest.stash import Stash +from _pytest.warning_types import PytestWarning + +if TYPE_CHECKING: + # Imported here due to circular import. + from _pytest.main import Session + from _pytest._code.code import _TracebackStyle + + +SEP = "/" + +tracebackcutdir = Path(_pytest.__file__).parent + + +def iterparentnodeids(nodeid: str) -> Iterator[str]: + """Return the parent node IDs of a given node ID, inclusive. + + For the node ID + + "testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_source" + + the result would be + + "" + "testing" + "testing/code" + "testing/code/test_excinfo.py" + "testing/code/test_excinfo.py::TestFormattedExcinfo" + "testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_source" + + Note that / components are only considered until the first ::. + """ + pos = 0 + first_colons: Optional[int] = nodeid.find("::") + if first_colons == -1: + first_colons = None + # The root Session node - always present. + yield "" + # Eagerly consume SEP parts until first colons. + while True: + at = nodeid.find(SEP, pos, first_colons) + if at == -1: + break + if at > 0: + yield nodeid[:at] + pos = at + len(SEP) + # Eagerly consume :: parts. + while True: + at = nodeid.find("::", pos) + if at == -1: + break + if at > 0: + yield nodeid[:at] + pos = at + len("::") + # The node ID itself. + if nodeid: + yield nodeid + + +def _check_path(path: Path, fspath: LEGACY_PATH) -> None: + if Path(fspath) != path: + raise ValueError( + f"Path({fspath!r}) != {path!r}\n" + "if both path and fspath are given they need to be equal" + ) + + +def _imply_path( + node_type: Type["Node"], + path: Optional[Path], + fspath: Optional[LEGACY_PATH], +) -> Path: + if fspath is not None: + warnings.warn( + NODE_CTOR_FSPATH_ARG.format( + node_type_name=node_type.__name__, + ), + stacklevel=6, + ) + if path is not None: + if fspath is not None: + _check_path(path, fspath) + return path + else: + assert fspath is not None + return Path(fspath) + + +_NodeType = TypeVar("_NodeType", bound="Node") + + +class NodeMeta(type): + def __call__(self, *k, **kw): + msg = ( + "Direct construction of {name} has been deprecated, please use {name}.from_parent.\n" + "See " + "https://docs.pytest.org/en/stable/deprecations.html#node-construction-changed-to-node-from-parent" + " for more details." + ).format(name=f"{self.__module__}.{self.__name__}") + fail(msg, pytrace=False) + + def _create(self, *k, **kw): + try: + return super().__call__(*k, **kw) + except TypeError: + sig = signature(getattr(self, "__init__")) + known_kw = {k: v for k, v in kw.items() if k in sig.parameters} + from .warning_types import PytestDeprecationWarning + + warnings.warn( + PytestDeprecationWarning( + f"{self} is not using a cooperative constructor and only takes {set(known_kw)}.\n" + "See https://docs.pytest.org/en/stable/deprecations.html" + "#constructors-of-custom-pytest-node-subclasses-should-take-kwargs " + "for more details." + ) + ) + + return super().__call__(*k, **known_kw) + + +class Node(metaclass=NodeMeta): + r"""Base class of :class:`Collector` and :class:`Item`, the components of + the test collection tree. + + ``Collector``\'s are the internal nodes of the tree, and ``Item``\'s are the + leaf nodes. + """ + + # Implemented in the legacypath plugin. + #: A ``LEGACY_PATH`` copy of the :attr:`path` attribute. Intended for usage + #: for methods not migrated to ``pathlib.Path`` yet, such as + #: :meth:`Item.reportinfo`. Will be deprecated in a future release, prefer + #: using :attr:`path` instead. + fspath: LEGACY_PATH + + # Use __slots__ to make attribute access faster. + # Note that __dict__ is still available. + __slots__ = ( + "name", + "parent", + "config", + "session", + "path", + "_nodeid", + "_store", + "__dict__", + ) + + def __init__( + self, + name: str, + parent: "Optional[Node]" = None, + config: Optional[Config] = None, + session: "Optional[Session]" = None, + fspath: Optional[LEGACY_PATH] = None, + path: Optional[Path] = None, + nodeid: Optional[str] = None, + ) -> None: + #: A unique name within the scope of the parent node. + self.name: str = name + + #: The parent collector node. + self.parent = parent + + if config: + #: The pytest config object. + self.config: Config = config + else: + if not parent: + raise TypeError("config or parent must be provided") + self.config = parent.config + + if session: + #: The pytest session this node is part of. + self.session: Session = session + else: + if not parent: + raise TypeError("session or parent must be provided") + self.session = parent.session + + if path is None and fspath is None: + path = getattr(parent, "path", None) + #: Filesystem path where this node was collected from (can be None). + self.path: Path = _imply_path(type(self), path, fspath=fspath) + + # The explicit annotation is to avoid publicly exposing NodeKeywords. + #: Keywords/markers collected from all scopes. + self.keywords: MutableMapping[str, Any] = NodeKeywords(self) + + #: The marker objects belonging to this node. + self.own_markers: List[Mark] = [] + + #: Allow adding of extra keywords to use for matching. + self.extra_keyword_matches: Set[str] = set() + + if nodeid is not None: + assert "::()" not in nodeid + self._nodeid = nodeid + else: + if not self.parent: + raise TypeError("nodeid or parent must be provided") + self._nodeid = self.parent.nodeid + "::" + self.name + + #: A place where plugins can store information on the node for their + #: own use. + self.stash: Stash = Stash() + # Deprecated alias. Was never public. Can be removed in a few releases. + self._store = self.stash + + @classmethod + def from_parent(cls, parent: "Node", **kw): + """Public constructor for Nodes. + + This indirection got introduced in order to enable removing + the fragile logic from the node constructors. + + Subclasses can use ``super().from_parent(...)`` when overriding the + construction. + + :param parent: The parent node of this Node. + """ + if "config" in kw: + raise TypeError("config is not a valid argument for from_parent") + if "session" in kw: + raise TypeError("session is not a valid argument for from_parent") + return cls._create(parent=parent, **kw) + + @property + def ihook(self): + """fspath-sensitive hook proxy used to call pytest hooks.""" + return self.session.gethookproxy(self.path) + + def __repr__(self) -> str: + return "<{} {}>".format(self.__class__.__name__, getattr(self, "name", None)) + + def warn(self, warning: Warning) -> None: + """Issue a warning for this Node. + + Warnings will be displayed after the test session, unless explicitly suppressed. + + :param Warning warning: + The warning instance to issue. + + :raises ValueError: If ``warning`` instance is not a subclass of Warning. + + Example usage: + + .. code-block:: python + + node.warn(PytestWarning("some message")) + node.warn(UserWarning("some message")) + + .. versionchanged:: 6.2 + Any subclass of :class:`Warning` is now accepted, rather than only + :class:`PytestWarning ` subclasses. + """ + # enforce type checks here to avoid getting a generic type error later otherwise. + if not isinstance(warning, Warning): + raise ValueError( + "warning must be an instance of Warning or subclass, got {!r}".format( + warning + ) + ) + path, lineno = get_fslocation_from_item(self) + assert lineno is not None + warnings.warn_explicit( + warning, + category=None, + filename=str(path), + lineno=lineno + 1, + ) + + # Methods for ordering nodes. + + @property + def nodeid(self) -> str: + """A ::-separated string denoting its collection tree address.""" + return self._nodeid + + def __hash__(self) -> int: + return hash(self._nodeid) + + def setup(self) -> None: + pass + + def teardown(self) -> None: + pass + + def listchain(self) -> List["Node"]: + """Return list of all parent collectors up to self, starting from + the root of collection tree. + + :returns: The nodes. + """ + chain = [] + item: Optional[Node] = self + while item is not None: + chain.append(item) + item = item.parent + chain.reverse() + return chain + + def add_marker( + self, marker: Union[str, MarkDecorator], append: bool = True + ) -> None: + """Dynamically add a marker object to the node. + + :param marker: + The marker. + :param append: + Whether to append the marker, or prepend it. + """ + from _pytest.mark import MARK_GEN + + if isinstance(marker, MarkDecorator): + marker_ = marker + elif isinstance(marker, str): + marker_ = getattr(MARK_GEN, marker) + else: + raise ValueError("is not a string or pytest.mark.* Marker") + self.keywords[marker_.name] = marker_ + if append: + self.own_markers.append(marker_.mark) + else: + self.own_markers.insert(0, marker_.mark) + + def iter_markers(self, name: Optional[str] = None) -> Iterator[Mark]: + """Iterate over all markers of the node. + + :param name: If given, filter the results by the name attribute. + :returns: An iterator of the markers of the node. + """ + return (x[1] for x in self.iter_markers_with_node(name=name)) + + def iter_markers_with_node( + self, name: Optional[str] = None + ) -> Iterator[Tuple["Node", Mark]]: + """Iterate over all markers of the node. + + :param name: If given, filter the results by the name attribute. + :returns: An iterator of (node, mark) tuples. + """ + for node in reversed(self.listchain()): + for mark in node.own_markers: + if name is None or getattr(mark, "name", None) == name: + yield node, mark + + @overload + def get_closest_marker(self, name: str) -> Optional[Mark]: + ... + + @overload + def get_closest_marker(self, name: str, default: Mark) -> Mark: + ... + + def get_closest_marker( + self, name: str, default: Optional[Mark] = None + ) -> Optional[Mark]: + """Return the first marker matching the name, from closest (for + example function) to farther level (for example module level). + + :param default: Fallback return value if no marker was found. + :param name: Name to filter by. + """ + return next(self.iter_markers(name=name), default) + + def listextrakeywords(self) -> Set[str]: + """Return a set of all extra keywords in self and any parents.""" + extra_keywords: Set[str] = set() + for item in self.listchain(): + extra_keywords.update(item.extra_keyword_matches) + return extra_keywords + + def listnames(self) -> List[str]: + return [x.name for x in self.listchain()] + + def addfinalizer(self, fin: Callable[[], object]) -> None: + """Register a function to be called without arguments when this node is + finalized. + + This method can only be called when this node is active + in a setup chain, for example during self.setup(). + """ + self.session._setupstate.addfinalizer(fin, self) + + def getparent(self, cls: Type[_NodeType]) -> Optional[_NodeType]: + """Get the next parent node (including self) which is an instance of + the given class. + + :param cls: The node class to search for. + :returns: The node, if found. + """ + current: Optional[Node] = self + while current and not isinstance(current, cls): + current = current.parent + assert current is None or isinstance(current, cls) + return current + + def _traceback_filter(self, excinfo: ExceptionInfo[BaseException]) -> Traceback: + return excinfo.traceback + + def _repr_failure_py( + self, + excinfo: ExceptionInfo[BaseException], + style: "Optional[_TracebackStyle]" = None, + ) -> TerminalRepr: + from _pytest.fixtures import FixtureLookupError + + if isinstance(excinfo.value, ConftestImportFailure): + excinfo = ExceptionInfo.from_exc_info(excinfo.value.excinfo) + if isinstance(excinfo.value, fail.Exception): + if not excinfo.value.pytrace: + style = "value" + if isinstance(excinfo.value, FixtureLookupError): + return excinfo.value.formatrepr() + + tbfilter: Union[bool, Callable[[ExceptionInfo[BaseException]], Traceback]] + if self.config.getoption("fulltrace", False): + style = "long" + tbfilter = False + else: + tbfilter = self._traceback_filter + if style == "auto": + style = "long" + # XXX should excinfo.getrepr record all data and toterminal() process it? + if style is None: + if self.config.getoption("tbstyle", "auto") == "short": + style = "short" + else: + style = "long" + + if self.config.getoption("verbose", 0) > 1: + truncate_locals = False + else: + truncate_locals = True + + # excinfo.getrepr() formats paths relative to the CWD if `abspath` is False. + # It is possible for a fixture/test to change the CWD while this code runs, which + # would then result in the user seeing confusing paths in the failure message. + # To fix this, if the CWD changed, always display the full absolute path. + # It will be better to just always display paths relative to invocation_dir, but + # this requires a lot of plumbing (#6428). + try: + abspath = Path(os.getcwd()) != self.config.invocation_params.dir + except OSError: + abspath = True + + return excinfo.getrepr( + funcargs=True, + abspath=abspath, + showlocals=self.config.getoption("showlocals", False), + style=style, + tbfilter=tbfilter, + truncate_locals=truncate_locals, + ) + + def repr_failure( + self, + excinfo: ExceptionInfo[BaseException], + style: "Optional[_TracebackStyle]" = None, + ) -> Union[str, TerminalRepr]: + """Return a representation of a collection or test failure. + + .. seealso:: :ref:`non-python tests` + + :param excinfo: Exception information for the failure. + """ + return self._repr_failure_py(excinfo, style) + + +def get_fslocation_from_item(node: "Node") -> Tuple[Union[str, Path], Optional[int]]: + """Try to extract the actual location from a node, depending on available attributes: + + * "location": a pair (path, lineno) + * "obj": a Python object that the node wraps. + * "fspath": just a path + + :rtype: A tuple of (str|Path, int) with filename and 0-based line number. + """ + # See Item.location. + location: Optional[Tuple[str, Optional[int], str]] = getattr(node, "location", None) + if location is not None: + return location[:2] + obj = getattr(node, "obj", None) + if obj is not None: + return getfslineno(obj) + return getattr(node, "fspath", "unknown location"), -1 + + +class Collector(Node): + """Base class of all collectors. + + Collector create children through `collect()` and thus iteratively build + the collection tree. + """ + + class CollectError(Exception): + """An error during collection, contains a custom message.""" + + def collect(self) -> Iterable[Union["Item", "Collector"]]: + """Collect children (items and collectors) for this collector.""" + raise NotImplementedError("abstract") + + # TODO: This omits the style= parameter which breaks Liskov Substitution. + def repr_failure( # type: ignore[override] + self, excinfo: ExceptionInfo[BaseException] + ) -> Union[str, TerminalRepr]: + """Return a representation of a collection failure. + + :param excinfo: Exception information for the failure. + """ + if isinstance(excinfo.value, self.CollectError) and not self.config.getoption( + "fulltrace", False + ): + exc = excinfo.value + return str(exc.args[0]) + + # Respect explicit tbstyle option, but default to "short" + # (_repr_failure_py uses "long" with "fulltrace" option always). + tbstyle = self.config.getoption("tbstyle", "auto") + if tbstyle == "auto": + tbstyle = "short" + + return self._repr_failure_py(excinfo, style=tbstyle) + + def _traceback_filter(self, excinfo: ExceptionInfo[BaseException]) -> Traceback: + if hasattr(self, "path"): + traceback = excinfo.traceback + ntraceback = traceback.cut(path=self.path) + if ntraceback == traceback: + ntraceback = ntraceback.cut(excludepath=tracebackcutdir) + return ntraceback.filter(excinfo) + return excinfo.traceback + + +def _check_initialpaths_for_relpath(session: "Session", path: Path) -> Optional[str]: + for initial_path in session._initialpaths: + if commonpath(path, initial_path) == initial_path: + rel = str(path.relative_to(initial_path)) + return "" if rel == "." else rel + return None + + +class FSCollector(Collector): + """Base class for filesystem collectors.""" + + def __init__( + self, + fspath: Optional[LEGACY_PATH] = None, + path_or_parent: Optional[Union[Path, Node]] = None, + path: Optional[Path] = None, + name: Optional[str] = None, + parent: Optional[Node] = None, + config: Optional[Config] = None, + session: Optional["Session"] = None, + nodeid: Optional[str] = None, + ) -> None: + if path_or_parent: + if isinstance(path_or_parent, Node): + assert parent is None + parent = cast(FSCollector, path_or_parent) + elif isinstance(path_or_parent, Path): + assert path is None + path = path_or_parent + + path = _imply_path(type(self), path, fspath=fspath) + if name is None: + name = path.name + if parent is not None and parent.path != path: + try: + rel = path.relative_to(parent.path) + except ValueError: + pass + else: + name = str(rel) + name = name.replace(os.sep, SEP) + self.path = path + + if session is None: + assert parent is not None + session = parent.session + + if nodeid is None: + try: + nodeid = str(self.path.relative_to(session.config.rootpath)) + except ValueError: + nodeid = _check_initialpaths_for_relpath(session, path) + + if nodeid and os.sep != SEP: + nodeid = nodeid.replace(os.sep, SEP) + + super().__init__( + name=name, + parent=parent, + config=config, + session=session, + nodeid=nodeid, + path=path, + ) + + @classmethod + def from_parent( + cls, + parent, + *, + fspath: Optional[LEGACY_PATH] = None, + path: Optional[Path] = None, + **kw, + ): + """The public constructor.""" + return super().from_parent(parent=parent, fspath=fspath, path=path, **kw) + + def gethookproxy(self, fspath: "os.PathLike[str]"): + warnings.warn(FSCOLLECTOR_GETHOOKPROXY_ISINITPATH, stacklevel=2) + return self.session.gethookproxy(fspath) + + def isinitpath(self, path: Union[str, "os.PathLike[str]"]) -> bool: + warnings.warn(FSCOLLECTOR_GETHOOKPROXY_ISINITPATH, stacklevel=2) + return self.session.isinitpath(path) + + +class File(FSCollector): + """Base class for collecting tests from a file. + + :ref:`non-python tests`. + """ + + +class Item(Node): + """Base class of all test invocation items. + + Note that for a single function there might be multiple test invocation items. + """ + + nextitem = None + + def __init__( + self, + name, + parent=None, + config: Optional[Config] = None, + session: Optional["Session"] = None, + nodeid: Optional[str] = None, + **kw, + ) -> None: + # The first two arguments are intentionally passed positionally, + # to keep plugins who define a node type which inherits from + # (pytest.Item, pytest.File) working (see issue #8435). + # They can be made kwargs when the deprecation above is done. + super().__init__( + name, + parent, + config=config, + session=session, + nodeid=nodeid, + **kw, + ) + self._report_sections: List[Tuple[str, str, str]] = [] + + #: A list of tuples (name, value) that holds user defined properties + #: for this test. + self.user_properties: List[Tuple[str, object]] = [] + + self._check_item_and_collector_diamond_inheritance() + + def _check_item_and_collector_diamond_inheritance(self) -> None: + """ + Check if the current type inherits from both File and Collector + at the same time, emitting a warning accordingly (#8447). + """ + cls = type(self) + + # We inject an attribute in the type to avoid issuing this warning + # for the same class more than once, which is not helpful. + # It is a hack, but was deemed acceptable in order to avoid + # flooding the user in the common case. + attr_name = "_pytest_diamond_inheritance_warning_shown" + if getattr(cls, attr_name, False): + return + setattr(cls, attr_name, True) + + problems = ", ".join( + base.__name__ for base in cls.__bases__ if issubclass(base, Collector) + ) + if problems: + warnings.warn( + f"{cls.__name__} is an Item subclass and should not be a collector, " + f"however its bases {problems} are collectors.\n" + "Please split the Collectors and the Item into separate node types.\n" + "Pytest Doc example: https://docs.pytest.org/en/latest/example/nonpython.html\n" + "example pull request on a plugin: https://github.com/asmeurer/pytest-flakes/pull/40/", + PytestWarning, + ) + + def runtest(self) -> None: + """Run the test case for this item. + + Must be implemented by subclasses. + + .. seealso:: :ref:`non-python tests` + """ + raise NotImplementedError("runtest must be implemented by Item subclass") + + def add_report_section(self, when: str, key: str, content: str) -> None: + """Add a new report section, similar to what's done internally to add + stdout and stderr captured output:: + + item.add_report_section("call", "stdout", "report section contents") + + :param str when: + One of the possible capture states, ``"setup"``, ``"call"``, ``"teardown"``. + :param str key: + Name of the section, can be customized at will. Pytest uses ``"stdout"`` and + ``"stderr"`` internally. + :param str content: + The full contents as a string. + """ + if content: + self._report_sections.append((when, key, content)) + + def reportinfo(self) -> Tuple[Union["os.PathLike[str]", str], Optional[int], str]: + """Get location information for this item for test reports. + + Returns a tuple with three elements: + + - The path of the test (default ``self.path``) + - The 0-based line number of the test (default ``None``) + - A name of the test to be shown (default ``""``) + + .. seealso:: :ref:`non-python tests` + """ + return self.path, None, "" + + @cached_property + def location(self) -> Tuple[str, Optional[int], str]: + """ + Returns a tuple of ``(relfspath, lineno, testname)`` for this item + where ``relfspath`` is file path relative to ``config.rootpath`` + and lineno is a 0-based line number. + """ + location = self.reportinfo() + path = absolutepath(os.fspath(location[0])) + relfspath = self.session._node_location_to_relpath(path) + assert type(location[2]) is str + return (relfspath, location[1], location[2]) diff --git a/venv/lib/python3.12/site-packages/_pytest/nose.py b/venv/lib/python3.12/site-packages/_pytest/nose.py new file mode 100644 index 0000000000..273bd045fb --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/nose.py @@ -0,0 +1,50 @@ +"""Run testsuites written for nose.""" +import warnings + +from _pytest.config import hookimpl +from _pytest.deprecated import NOSE_SUPPORT +from _pytest.fixtures import getfixturemarker +from _pytest.nodes import Item +from _pytest.python import Function +from _pytest.unittest import TestCaseFunction + + +@hookimpl(trylast=True) +def pytest_runtest_setup(item: Item) -> None: + if not isinstance(item, Function): + return + # Don't do nose style setup/teardown on direct unittest style classes. + if isinstance(item, TestCaseFunction): + return + + # Capture the narrowed type of item for the teardown closure, + # see https://github.com/python/mypy/issues/2608 + func = item + + call_optional(func.obj, "setup", func.nodeid) + func.addfinalizer(lambda: call_optional(func.obj, "teardown", func.nodeid)) + + # NOTE: Module- and class-level fixtures are handled in python.py + # with `pluginmanager.has_plugin("nose")` checks. + # It would have been nicer to implement them outside of core, but + # it's not straightforward. + + +def call_optional(obj: object, name: str, nodeid: str) -> bool: + method = getattr(obj, name, None) + if method is None: + return False + is_fixture = getfixturemarker(method) is not None + if is_fixture: + return False + if not callable(method): + return False + # Warn about deprecation of this plugin. + method_name = getattr(method, "__name__", str(method)) + warnings.warn( + NOSE_SUPPORT.format(nodeid=nodeid, method=method_name, stage=name), stacklevel=2 + ) + # If there are any problems allow the exception to raise rather than + # silently ignoring it. + method() + return True diff --git a/venv/lib/python3.12/site-packages/_pytest/outcomes.py b/venv/lib/python3.12/site-packages/_pytest/outcomes.py new file mode 100644 index 0000000000..53c3e1511c --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/outcomes.py @@ -0,0 +1,311 @@ +"""Exception classes and constants handling test outcomes as well as +functions creating them.""" +import sys +import warnings +from typing import Any +from typing import Callable +from typing import cast +from typing import NoReturn +from typing import Optional +from typing import Type +from typing import TypeVar + +from _pytest.deprecated import KEYWORD_MSG_ARG + +TYPE_CHECKING = False # Avoid circular import through compat. + +if TYPE_CHECKING: + from typing_extensions import Protocol +else: + # typing.Protocol is only available starting from Python 3.8. It is also + # available from typing_extensions, but we don't want a runtime dependency + # on that. So use a dummy runtime implementation. + from typing import Generic + + Protocol = Generic + + +class OutcomeException(BaseException): + """OutcomeException and its subclass instances indicate and contain info + about test and collection outcomes.""" + + def __init__(self, msg: Optional[str] = None, pytrace: bool = True) -> None: + if msg is not None and not isinstance(msg, str): + error_msg = ( # type: ignore[unreachable] + "{} expected string as 'msg' parameter, got '{}' instead.\n" + "Perhaps you meant to use a mark?" + ) + raise TypeError(error_msg.format(type(self).__name__, type(msg).__name__)) + super().__init__(msg) + self.msg = msg + self.pytrace = pytrace + + def __repr__(self) -> str: + if self.msg is not None: + return self.msg + return f"<{self.__class__.__name__} instance>" + + __str__ = __repr__ + + +TEST_OUTCOME = (OutcomeException, Exception) + + +class Skipped(OutcomeException): + # XXX hackish: on 3k we fake to live in the builtins + # in order to have Skipped exception printing shorter/nicer + __module__ = "builtins" + + def __init__( + self, + msg: Optional[str] = None, + pytrace: bool = True, + allow_module_level: bool = False, + *, + _use_item_location: bool = False, + ) -> None: + super().__init__(msg=msg, pytrace=pytrace) + self.allow_module_level = allow_module_level + # If true, the skip location is reported as the item's location, + # instead of the place that raises the exception/calls skip(). + self._use_item_location = _use_item_location + + +class Failed(OutcomeException): + """Raised from an explicit call to pytest.fail().""" + + __module__ = "builtins" + + +class Exit(Exception): + """Raised for immediate program exits (no tracebacks/summaries).""" + + def __init__( + self, msg: str = "unknown reason", returncode: Optional[int] = None + ) -> None: + self.msg = msg + self.returncode = returncode + super().__init__(msg) + + +# Elaborate hack to work around https://github.com/python/mypy/issues/2087. +# Ideally would just be `exit.Exception = Exit` etc. + +_F = TypeVar("_F", bound=Callable[..., object]) +_ET = TypeVar("_ET", bound=Type[BaseException]) + + +class _WithException(Protocol[_F, _ET]): + Exception: _ET + __call__: _F + + +def _with_exception(exception_type: _ET) -> Callable[[_F], _WithException[_F, _ET]]: + def decorate(func: _F) -> _WithException[_F, _ET]: + func_with_exception = cast(_WithException[_F, _ET], func) + func_with_exception.Exception = exception_type + return func_with_exception + + return decorate + + +# Exposed helper methods. + + +@_with_exception(Exit) +def exit( + reason: str = "", returncode: Optional[int] = None, *, msg: Optional[str] = None +) -> NoReturn: + """Exit testing process. + + :param reason: + The message to show as the reason for exiting pytest. reason has a default value + only because `msg` is deprecated. + + :param returncode: + Return code to be used when exiting pytest. None means the same as ``0`` (no error), same as :func:`sys.exit`. + + :param msg: + Same as ``reason``, but deprecated. Will be removed in a future version, use ``reason`` instead. + """ + __tracebackhide__ = True + from _pytest.config import UsageError + + if reason and msg: + raise UsageError( + "cannot pass reason and msg to exit(), `msg` is deprecated, use `reason`." + ) + if not reason: + if msg is None: + raise UsageError("exit() requires a reason argument") + warnings.warn(KEYWORD_MSG_ARG.format(func="exit"), stacklevel=2) + reason = msg + raise Exit(reason, returncode) + + +@_with_exception(Skipped) +def skip( + reason: str = "", *, allow_module_level: bool = False, msg: Optional[str] = None +) -> NoReturn: + """Skip an executing test with the given message. + + This function should be called only during testing (setup, call or teardown) or + during collection by using the ``allow_module_level`` flag. This function can + be called in doctests as well. + + :param reason: + The message to show the user as reason for the skip. + + :param allow_module_level: + Allows this function to be called at module level. + Raising the skip exception at module level will stop + the execution of the module and prevent the collection of all tests in the module, + even those defined before the `skip` call. + + Defaults to False. + + :param msg: + Same as ``reason``, but deprecated. Will be removed in a future version, use ``reason`` instead. + + .. note:: + It is better to use the :ref:`pytest.mark.skipif ref` marker when + possible to declare a test to be skipped under certain conditions + like mismatching platforms or dependencies. + Similarly, use the ``# doctest: +SKIP`` directive (see :py:data:`doctest.SKIP`) + to skip a doctest statically. + """ + __tracebackhide__ = True + reason = _resolve_msg_to_reason("skip", reason, msg) + raise Skipped(msg=reason, allow_module_level=allow_module_level) + + +@_with_exception(Failed) +def fail(reason: str = "", pytrace: bool = True, msg: Optional[str] = None) -> NoReturn: + """Explicitly fail an executing test with the given message. + + :param reason: + The message to show the user as reason for the failure. + + :param pytrace: + If False, msg represents the full failure information and no + python traceback will be reported. + + :param msg: + Same as ``reason``, but deprecated. Will be removed in a future version, use ``reason`` instead. + """ + __tracebackhide__ = True + reason = _resolve_msg_to_reason("fail", reason, msg) + raise Failed(msg=reason, pytrace=pytrace) + + +def _resolve_msg_to_reason( + func_name: str, reason: str, msg: Optional[str] = None +) -> str: + """ + Handles converting the deprecated msg parameter if provided into + reason, raising a deprecation warning. This function will be removed + when the optional msg argument is removed from here in future. + + :param str func_name: + The name of the offending function, this is formatted into the deprecation message. + + :param str reason: + The reason= passed into either pytest.fail() or pytest.skip() + + :param str msg: + The msg= passed into either pytest.fail() or pytest.skip(). This will + be converted into reason if it is provided to allow pytest.skip(msg=) or + pytest.fail(msg=) to continue working in the interim period. + + :returns: + The value to use as reason. + + """ + __tracebackhide__ = True + if msg is not None: + if reason: + from pytest import UsageError + + raise UsageError( + f"Passing both ``reason`` and ``msg`` to pytest.{func_name}(...) is not permitted." + ) + warnings.warn(KEYWORD_MSG_ARG.format(func=func_name), stacklevel=3) + reason = msg + return reason + + +class XFailed(Failed): + """Raised from an explicit call to pytest.xfail().""" + + +@_with_exception(XFailed) +def xfail(reason: str = "") -> NoReturn: + """Imperatively xfail an executing test or setup function with the given reason. + + This function should be called only during testing (setup, call or teardown). + + :param reason: + The message to show the user as reason for the xfail. + + .. note:: + It is better to use the :ref:`pytest.mark.xfail ref` marker when + possible to declare a test to be xfailed under certain conditions + like known bugs or missing features. + """ + __tracebackhide__ = True + raise XFailed(reason) + + +def importorskip( + modname: str, minversion: Optional[str] = None, reason: Optional[str] = None +) -> Any: + """Import and return the requested module ``modname``, or skip the + current test if the module cannot be imported. + + :param modname: + The name of the module to import. + :param minversion: + If given, the imported module's ``__version__`` attribute must be at + least this minimal version, otherwise the test is still skipped. + :param reason: + If given, this reason is shown as the message when the module cannot + be imported. + + :returns: + The imported module. This should be assigned to its canonical name. + + Example:: + + docutils = pytest.importorskip("docutils") + """ + import warnings + + __tracebackhide__ = True + compile(modname, "", "eval") # to catch syntaxerrors + + with warnings.catch_warnings(): + # Make sure to ignore ImportWarnings that might happen because + # of existing directories with the same name we're trying to + # import but without a __init__.py file. + warnings.simplefilter("ignore") + try: + __import__(modname) + except ImportError as exc: + if reason is None: + reason = f"could not import {modname!r}: {exc}" + raise Skipped(reason, allow_module_level=True) from None + mod = sys.modules[modname] + if minversion is None: + return mod + verattr = getattr(mod, "__version__", None) + if minversion is not None: + # Imported lazily to improve start-up time. + from packaging.version import Version + + if verattr is None or Version(verattr) < Version(minversion): + raise Skipped( + "module %r has __version__ %r, required is: %r" + % (modname, verattr, minversion), + allow_module_level=True, + ) + return mod diff --git a/venv/lib/python3.12/site-packages/_pytest/pastebin.py b/venv/lib/python3.12/site-packages/_pytest/pastebin.py new file mode 100644 index 0000000000..22c7a62237 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/pastebin.py @@ -0,0 +1,110 @@ +"""Submit failure or test session information to a pastebin service.""" +import tempfile +from io import StringIO +from typing import IO +from typing import Union + +import pytest +from _pytest.config import Config +from _pytest.config import create_terminal_writer +from _pytest.config.argparsing import Parser +from _pytest.stash import StashKey +from _pytest.terminal import TerminalReporter + + +pastebinfile_key = StashKey[IO[bytes]]() + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("terminal reporting") + group._addoption( + "--pastebin", + metavar="mode", + action="store", + dest="pastebin", + default=None, + choices=["failed", "all"], + help="Send failed|all info to bpaste.net pastebin service", + ) + + +@pytest.hookimpl(trylast=True) +def pytest_configure(config: Config) -> None: + if config.option.pastebin == "all": + tr = config.pluginmanager.getplugin("terminalreporter") + # If no terminal reporter plugin is present, nothing we can do here; + # this can happen when this function executes in a worker node + # when using pytest-xdist, for example. + if tr is not None: + # pastebin file will be UTF-8 encoded binary file. + config.stash[pastebinfile_key] = tempfile.TemporaryFile("w+b") + oldwrite = tr._tw.write + + def tee_write(s, **kwargs): + oldwrite(s, **kwargs) + if isinstance(s, str): + s = s.encode("utf-8") + config.stash[pastebinfile_key].write(s) + + tr._tw.write = tee_write + + +def pytest_unconfigure(config: Config) -> None: + if pastebinfile_key in config.stash: + pastebinfile = config.stash[pastebinfile_key] + # Get terminal contents and delete file. + pastebinfile.seek(0) + sessionlog = pastebinfile.read() + pastebinfile.close() + del config.stash[pastebinfile_key] + # Undo our patching in the terminal reporter. + tr = config.pluginmanager.getplugin("terminalreporter") + del tr._tw.__dict__["write"] + # Write summary. + tr.write_sep("=", "Sending information to Paste Service") + pastebinurl = create_new_paste(sessionlog) + tr.write_line("pastebin session-log: %s\n" % pastebinurl) + + +def create_new_paste(contents: Union[str, bytes]) -> str: + """Create a new paste using the bpaste.net service. + + :contents: Paste contents string. + :returns: URL to the pasted contents, or an error message. + """ + import re + from urllib.request import urlopen + from urllib.parse import urlencode + + params = {"code": contents, "lexer": "text", "expiry": "1week"} + url = "https://bpa.st" + try: + response: str = ( + urlopen(url, data=urlencode(params).encode("ascii")).read().decode("utf-8") + ) + except OSError as exc_info: # urllib errors + return "bad response: %s" % exc_info + m = re.search(r'href="/raw/(\w+)"', response) + if m: + return f"{url}/show/{m.group(1)}" + else: + return "bad response: invalid format ('" + response + "')" + + +def pytest_terminal_summary(terminalreporter: TerminalReporter) -> None: + if terminalreporter.config.option.pastebin != "failed": + return + if "failed" in terminalreporter.stats: + terminalreporter.write_sep("=", "Sending information to Paste Service") + for rep in terminalreporter.stats["failed"]: + try: + msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc + except AttributeError: + msg = terminalreporter._getfailureheadline(rep) + file = StringIO() + tw = create_terminal_writer(terminalreporter.config, file) + rep.toterminal(tw) + s = file.getvalue() + assert len(s) + pastebinurl = create_new_paste(s) + terminalreporter.write_line(f"{msg} --> {pastebinurl}") diff --git a/venv/lib/python3.12/site-packages/_pytest/pathlib.py b/venv/lib/python3.12/site-packages/_pytest/pathlib.py new file mode 100644 index 0000000000..c2f8535f5f --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/pathlib.py @@ -0,0 +1,804 @@ +import atexit +import contextlib +import fnmatch +import importlib.util +import itertools +import os +import shutil +import sys +import types +import uuid +import warnings +from enum import Enum +from errno import EBADF +from errno import ELOOP +from errno import ENOENT +from errno import ENOTDIR +from functools import partial +from os.path import expanduser +from os.path import expandvars +from os.path import isabs +from os.path import sep +from pathlib import Path +from pathlib import PurePath +from posixpath import sep as posix_sep +from types import ModuleType +from typing import Callable +from typing import Dict +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Optional +from typing import Set +from typing import Tuple +from typing import Type +from typing import TypeVar +from typing import Union + +from _pytest.compat import assert_never +from _pytest.outcomes import skip +from _pytest.warning_types import PytestWarning + +LOCK_TIMEOUT = 60 * 60 * 24 * 3 + + +_AnyPurePath = TypeVar("_AnyPurePath", bound=PurePath) + +# The following function, variables and comments were +# copied from cpython 3.9 Lib/pathlib.py file. + +# EBADF - guard against macOS `stat` throwing EBADF +_IGNORED_ERRORS = (ENOENT, ENOTDIR, EBADF, ELOOP) + +_IGNORED_WINERRORS = ( + 21, # ERROR_NOT_READY - drive exists but is not accessible + 1921, # ERROR_CANT_RESOLVE_FILENAME - fix for broken symlink pointing to itself +) + + +def _ignore_error(exception): + return ( + getattr(exception, "errno", None) in _IGNORED_ERRORS + or getattr(exception, "winerror", None) in _IGNORED_WINERRORS + ) + + +def get_lock_path(path: _AnyPurePath) -> _AnyPurePath: + return path.joinpath(".lock") + + +def on_rm_rf_error( + func, + path: str, + excinfo: Union[ + BaseException, + Tuple[Type[BaseException], BaseException, Optional[types.TracebackType]], + ], + *, + start_path: Path, +) -> bool: + """Handle known read-only errors during rmtree. + + The returned value is used only by our own tests. + """ + if isinstance(excinfo, BaseException): + exc = excinfo + else: + exc = excinfo[1] + + # Another process removed the file in the middle of the "rm_rf" (xdist for example). + # More context: https://github.com/pytest-dev/pytest/issues/5974#issuecomment-543799018 + if isinstance(exc, FileNotFoundError): + return False + + if not isinstance(exc, PermissionError): + warnings.warn( + PytestWarning(f"(rm_rf) error removing {path}\n{type(exc)}: {exc}") + ) + return False + + if func not in (os.rmdir, os.remove, os.unlink): + if func not in (os.open,): + warnings.warn( + PytestWarning( + "(rm_rf) unknown function {} when removing {}:\n{}: {}".format( + func, path, type(exc), exc + ) + ) + ) + return False + + # Chmod + retry. + import stat + + def chmod_rw(p: str) -> None: + mode = os.stat(p).st_mode + os.chmod(p, mode | stat.S_IRUSR | stat.S_IWUSR) + + # For files, we need to recursively go upwards in the directories to + # ensure they all are also writable. + p = Path(path) + if p.is_file(): + for parent in p.parents: + chmod_rw(str(parent)) + # Stop when we reach the original path passed to rm_rf. + if parent == start_path: + break + chmod_rw(str(path)) + + func(path) + return True + + +def ensure_extended_length_path(path: Path) -> Path: + """Get the extended-length version of a path (Windows). + + On Windows, by default, the maximum length of a path (MAX_PATH) is 260 + characters, and operations on paths longer than that fail. But it is possible + to overcome this by converting the path to "extended-length" form before + performing the operation: + https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file#maximum-path-length-limitation + + On Windows, this function returns the extended-length absolute version of path. + On other platforms it returns path unchanged. + """ + if sys.platform.startswith("win32"): + path = path.resolve() + path = Path(get_extended_length_path_str(str(path))) + return path + + +def get_extended_length_path_str(path: str) -> str: + """Convert a path to a Windows extended length path.""" + long_path_prefix = "\\\\?\\" + unc_long_path_prefix = "\\\\?\\UNC\\" + if path.startswith((long_path_prefix, unc_long_path_prefix)): + return path + # UNC + if path.startswith("\\\\"): + return unc_long_path_prefix + path[2:] + return long_path_prefix + path + + +def rm_rf(path: Path) -> None: + """Remove the path contents recursively, even if some elements + are read-only.""" + path = ensure_extended_length_path(path) + onerror = partial(on_rm_rf_error, start_path=path) + if sys.version_info >= (3, 12): + shutil.rmtree(str(path), onexc=onerror) + else: + shutil.rmtree(str(path), onerror=onerror) + + +def find_prefixed(root: Path, prefix: str) -> Iterator[Path]: + """Find all elements in root that begin with the prefix, case insensitive.""" + l_prefix = prefix.lower() + for x in root.iterdir(): + if x.name.lower().startswith(l_prefix): + yield x + + +def extract_suffixes(iter: Iterable[PurePath], prefix: str) -> Iterator[str]: + """Return the parts of the paths following the prefix. + + :param iter: Iterator over path names. + :param prefix: Expected prefix of the path names. + """ + p_len = len(prefix) + for p in iter: + yield p.name[p_len:] + + +def find_suffixes(root: Path, prefix: str) -> Iterator[str]: + """Combine find_prefixes and extract_suffixes.""" + return extract_suffixes(find_prefixed(root, prefix), prefix) + + +def parse_num(maybe_num) -> int: + """Parse number path suffixes, returns -1 on error.""" + try: + return int(maybe_num) + except ValueError: + return -1 + + +def _force_symlink( + root: Path, target: Union[str, PurePath], link_to: Union[str, Path] +) -> None: + """Helper to create the current symlink. + + It's full of race conditions that are reasonably OK to ignore + for the context of best effort linking to the latest test run. + + The presumption being that in case of much parallelism + the inaccuracy is going to be acceptable. + """ + current_symlink = root.joinpath(target) + try: + current_symlink.unlink() + except OSError: + pass + try: + current_symlink.symlink_to(link_to) + except Exception: + pass + + +def make_numbered_dir(root: Path, prefix: str, mode: int = 0o700) -> Path: + """Create a directory with an increased number as suffix for the given prefix.""" + for i in range(10): + # try up to 10 times to create the folder + max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1) + new_number = max_existing + 1 + new_path = root.joinpath(f"{prefix}{new_number}") + try: + new_path.mkdir(mode=mode) + except Exception: + pass + else: + _force_symlink(root, prefix + "current", new_path) + return new_path + else: + raise OSError( + "could not create numbered dir with prefix " + "{prefix} in {root} after 10 tries".format(prefix=prefix, root=root) + ) + + +def create_cleanup_lock(p: Path) -> Path: + """Create a lock to prevent premature folder cleanup.""" + lock_path = get_lock_path(p) + try: + fd = os.open(str(lock_path), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644) + except FileExistsError as e: + raise OSError(f"cannot create lockfile in {p}") from e + else: + pid = os.getpid() + spid = str(pid).encode() + os.write(fd, spid) + os.close(fd) + if not lock_path.is_file(): + raise OSError("lock path got renamed after successful creation") + return lock_path + + +def register_cleanup_lock_removal(lock_path: Path, register=atexit.register): + """Register a cleanup function for removing a lock, by default on atexit.""" + pid = os.getpid() + + def cleanup_on_exit(lock_path: Path = lock_path, original_pid: int = pid) -> None: + current_pid = os.getpid() + if current_pid != original_pid: + # fork + return + try: + lock_path.unlink() + except OSError: + pass + + return register(cleanup_on_exit) + + +def maybe_delete_a_numbered_dir(path: Path) -> None: + """Remove a numbered directory if its lock can be obtained and it does + not seem to be in use.""" + path = ensure_extended_length_path(path) + lock_path = None + try: + lock_path = create_cleanup_lock(path) + parent = path.parent + + garbage = parent.joinpath(f"garbage-{uuid.uuid4()}") + path.rename(garbage) + rm_rf(garbage) + except OSError: + # known races: + # * other process did a cleanup at the same time + # * deletable folder was found + # * process cwd (Windows) + return + finally: + # If we created the lock, ensure we remove it even if we failed + # to properly remove the numbered dir. + if lock_path is not None: + try: + lock_path.unlink() + except OSError: + pass + + +def ensure_deletable(path: Path, consider_lock_dead_if_created_before: float) -> bool: + """Check if `path` is deletable based on whether the lock file is expired.""" + if path.is_symlink(): + return False + lock = get_lock_path(path) + try: + if not lock.is_file(): + return True + except OSError: + # we might not have access to the lock file at all, in this case assume + # we don't have access to the entire directory (#7491). + return False + try: + lock_time = lock.stat().st_mtime + except Exception: + return False + else: + if lock_time < consider_lock_dead_if_created_before: + # We want to ignore any errors while trying to remove the lock such as: + # - PermissionDenied, like the file permissions have changed since the lock creation; + # - FileNotFoundError, in case another pytest process got here first; + # and any other cause of failure. + with contextlib.suppress(OSError): + lock.unlink() + return True + return False + + +def try_cleanup(path: Path, consider_lock_dead_if_created_before: float) -> None: + """Try to cleanup a folder if we can ensure it's deletable.""" + if ensure_deletable(path, consider_lock_dead_if_created_before): + maybe_delete_a_numbered_dir(path) + + +def cleanup_candidates(root: Path, prefix: str, keep: int) -> Iterator[Path]: + """List candidates for numbered directories to be removed - follows py.path.""" + max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1) + max_delete = max_existing - keep + paths = find_prefixed(root, prefix) + paths, paths2 = itertools.tee(paths) + numbers = map(parse_num, extract_suffixes(paths2, prefix)) + for path, number in zip(paths, numbers): + if number <= max_delete: + yield path + + +def cleanup_dead_symlinks(root: Path): + for left_dir in root.iterdir(): + if left_dir.is_symlink(): + if not left_dir.resolve().exists(): + left_dir.unlink() + + +def cleanup_numbered_dir( + root: Path, prefix: str, keep: int, consider_lock_dead_if_created_before: float +) -> None: + """Cleanup for lock driven numbered directories.""" + if not root.exists(): + return + for path in cleanup_candidates(root, prefix, keep): + try_cleanup(path, consider_lock_dead_if_created_before) + for path in root.glob("garbage-*"): + try_cleanup(path, consider_lock_dead_if_created_before) + + cleanup_dead_symlinks(root) + + +def make_numbered_dir_with_cleanup( + root: Path, + prefix: str, + keep: int, + lock_timeout: float, + mode: int, +) -> Path: + """Create a numbered dir with a cleanup lock and remove old ones.""" + e = None + for i in range(10): + try: + p = make_numbered_dir(root, prefix, mode) + # Only lock the current dir when keep is not 0 + if keep != 0: + lock_path = create_cleanup_lock(p) + register_cleanup_lock_removal(lock_path) + except Exception as exc: + e = exc + else: + consider_lock_dead_if_created_before = p.stat().st_mtime - lock_timeout + # Register a cleanup for program exit + atexit.register( + cleanup_numbered_dir, + root, + prefix, + keep, + consider_lock_dead_if_created_before, + ) + return p + assert e is not None + raise e + + +def resolve_from_str(input: str, rootpath: Path) -> Path: + input = expanduser(input) + input = expandvars(input) + if isabs(input): + return Path(input) + else: + return rootpath.joinpath(input) + + +def fnmatch_ex(pattern: str, path: Union[str, "os.PathLike[str]"]) -> bool: + """A port of FNMatcher from py.path.common which works with PurePath() instances. + + The difference between this algorithm and PurePath.match() is that the + latter matches "**" glob expressions for each part of the path, while + this algorithm uses the whole path instead. + + For example: + "tests/foo/bar/doc/test_foo.py" matches pattern "tests/**/doc/test*.py" + with this algorithm, but not with PurePath.match(). + + This algorithm was ported to keep backward-compatibility with existing + settings which assume paths match according this logic. + + References: + * https://bugs.python.org/issue29249 + * https://bugs.python.org/issue34731 + """ + path = PurePath(path) + iswin32 = sys.platform.startswith("win") + + if iswin32 and sep not in pattern and posix_sep in pattern: + # Running on Windows, the pattern has no Windows path separators, + # and the pattern has one or more Posix path separators. Replace + # the Posix path separators with the Windows path separator. + pattern = pattern.replace(posix_sep, sep) + + if sep not in pattern: + name = path.name + else: + name = str(path) + if path.is_absolute() and not os.path.isabs(pattern): + pattern = f"*{os.sep}{pattern}" + return fnmatch.fnmatch(name, pattern) + + +def parts(s: str) -> Set[str]: + parts = s.split(sep) + return {sep.join(parts[: i + 1]) or sep for i in range(len(parts))} + + +def symlink_or_skip(src, dst, **kwargs): + """Make a symlink, or skip the test in case symlinks are not supported.""" + try: + os.symlink(str(src), str(dst), **kwargs) + except OSError as e: + skip(f"symlinks not supported: {e}") + + +class ImportMode(Enum): + """Possible values for `mode` parameter of `import_path`.""" + + prepend = "prepend" + append = "append" + importlib = "importlib" + + +class ImportPathMismatchError(ImportError): + """Raised on import_path() if there is a mismatch of __file__'s. + + This can happen when `import_path` is called multiple times with different filenames that has + the same basename but reside in packages + (for example "/tests1/test_foo.py" and "/tests2/test_foo.py"). + """ + + +def import_path( + p: Union[str, "os.PathLike[str]"], + *, + mode: Union[str, ImportMode] = ImportMode.prepend, + root: Path, +) -> ModuleType: + """Import and return a module from the given path, which can be a file (a module) or + a directory (a package). + + The import mechanism used is controlled by the `mode` parameter: + + * `mode == ImportMode.prepend`: the directory containing the module (or package, taking + `__init__.py` files into account) will be put at the *start* of `sys.path` before + being imported with `importlib.import_module`. + + * `mode == ImportMode.append`: same as `prepend`, but the directory will be appended + to the end of `sys.path`, if not already in `sys.path`. + + * `mode == ImportMode.importlib`: uses more fine control mechanisms provided by `importlib` + to import the module, which avoids having to muck with `sys.path` at all. It effectively + allows having same-named test modules in different places. + + :param root: + Used as an anchor when mode == ImportMode.importlib to obtain + a unique name for the module being imported so it can safely be stored + into ``sys.modules``. + + :raises ImportPathMismatchError: + If after importing the given `path` and the module `__file__` + are different. Only raised in `prepend` and `append` modes. + """ + mode = ImportMode(mode) + + path = Path(p) + + if not path.exists(): + raise ImportError(path) + + if mode is ImportMode.importlib: + module_name = module_name_from_path(path, root) + with contextlib.suppress(KeyError): + return sys.modules[module_name] + + for meta_importer in sys.meta_path: + spec = meta_importer.find_spec(module_name, [str(path.parent)]) + if spec is not None: + break + else: + spec = importlib.util.spec_from_file_location(module_name, str(path)) + + if spec is None: + raise ImportError(f"Can't find module {module_name} at location {path}") + mod = importlib.util.module_from_spec(spec) + sys.modules[module_name] = mod + spec.loader.exec_module(mod) # type: ignore[union-attr] + insert_missing_modules(sys.modules, module_name) + return mod + + pkg_path = resolve_package_path(path) + if pkg_path is not None: + pkg_root = pkg_path.parent + names = list(path.with_suffix("").relative_to(pkg_root).parts) + if names[-1] == "__init__": + names.pop() + module_name = ".".join(names) + else: + pkg_root = path.parent + module_name = path.stem + + # Change sys.path permanently: restoring it at the end of this function would cause surprising + # problems because of delayed imports: for example, a conftest.py file imported by this function + # might have local imports, which would fail at runtime if we restored sys.path. + if mode is ImportMode.append: + if str(pkg_root) not in sys.path: + sys.path.append(str(pkg_root)) + elif mode is ImportMode.prepend: + if str(pkg_root) != sys.path[0]: + sys.path.insert(0, str(pkg_root)) + else: + assert_never(mode) + + importlib.import_module(module_name) + + mod = sys.modules[module_name] + if path.name == "__init__.py": + return mod + + ignore = os.environ.get("PY_IGNORE_IMPORTMISMATCH", "") + if ignore != "1": + module_file = mod.__file__ + if module_file is None: + raise ImportPathMismatchError(module_name, module_file, path) + + if module_file.endswith((".pyc", ".pyo")): + module_file = module_file[:-1] + if module_file.endswith(os.sep + "__init__.py"): + module_file = module_file[: -(len(os.sep + "__init__.py"))] + + try: + is_same = _is_same(str(path), module_file) + except FileNotFoundError: + is_same = False + + if not is_same: + raise ImportPathMismatchError(module_name, module_file, path) + + return mod + + +# Implement a special _is_same function on Windows which returns True if the two filenames +# compare equal, to circumvent os.path.samefile returning False for mounts in UNC (#7678). +if sys.platform.startswith("win"): + + def _is_same(f1: str, f2: str) -> bool: + return Path(f1) == Path(f2) or os.path.samefile(f1, f2) + +else: + + def _is_same(f1: str, f2: str) -> bool: + return os.path.samefile(f1, f2) + + +def module_name_from_path(path: Path, root: Path) -> str: + """ + Return a dotted module name based on the given path, anchored on root. + + For example: path="projects/src/tests/test_foo.py" and root="/projects", the + resulting module name will be "src.tests.test_foo". + """ + path = path.with_suffix("") + try: + relative_path = path.relative_to(root) + except ValueError: + # If we can't get a relative path to root, use the full path, except + # for the first part ("d:\\" or "/" depending on the platform, for example). + path_parts = path.parts[1:] + else: + # Use the parts for the relative path to the root path. + path_parts = relative_path.parts + + # Module name for packages do not contain the __init__ file, unless + # the `__init__.py` file is at the root. + if len(path_parts) >= 2 and path_parts[-1] == "__init__": + path_parts = path_parts[:-1] + + return ".".join(path_parts) + + +def insert_missing_modules(modules: Dict[str, ModuleType], module_name: str) -> None: + """ + Used by ``import_path`` to create intermediate modules when using mode=importlib. + + When we want to import a module as "src.tests.test_foo" for example, we need + to create empty modules "src" and "src.tests" after inserting "src.tests.test_foo", + otherwise "src.tests.test_foo" is not importable by ``__import__``. + """ + module_parts = module_name.split(".") + child_module: Union[ModuleType, None] = None + module: Union[ModuleType, None] = None + child_name: str = "" + while module_name: + if module_name not in modules: + try: + # If sys.meta_path is empty, calling import_module will issue + # a warning and raise ModuleNotFoundError. To avoid the + # warning, we check sys.meta_path explicitly and raise the error + # ourselves to fall back to creating a dummy module. + if not sys.meta_path: + raise ModuleNotFoundError + module = importlib.import_module(module_name) + except ModuleNotFoundError: + module = ModuleType( + module_name, + doc="Empty module created by pytest's importmode=importlib.", + ) + else: + module = modules[module_name] + if child_module: + # Add child attribute to the parent that can reference the child + # modules. + if not hasattr(module, child_name): + setattr(module, child_name, child_module) + modules[module_name] = module + # Keep track of the child module while moving up the tree. + child_module, child_name = module, module_name.rpartition(".")[-1] + module_parts.pop(-1) + module_name = ".".join(module_parts) + + +def resolve_package_path(path: Path) -> Optional[Path]: + """Return the Python package path by looking for the last + directory upwards which still contains an __init__.py. + + Returns None if it can not be determined. + """ + result = None + for parent in itertools.chain((path,), path.parents): + if parent.is_dir(): + if not parent.joinpath("__init__.py").is_file(): + break + if not parent.name.isidentifier(): + break + result = parent + return result + + +def scandir(path: Union[str, "os.PathLike[str]"]) -> List["os.DirEntry[str]"]: + """Scan a directory recursively, in breadth-first order. + + The returned entries are sorted. + """ + entries = [] + with os.scandir(path) as s: + # Skip entries with symlink loops and other brokenness, so the caller + # doesn't have to deal with it. + for entry in s: + try: + entry.is_file() + except OSError as err: + if _ignore_error(err): + continue + raise + entries.append(entry) + entries.sort(key=lambda entry: entry.name) + return entries + + +def visit( + path: Union[str, "os.PathLike[str]"], recurse: Callable[["os.DirEntry[str]"], bool] +) -> Iterator["os.DirEntry[str]"]: + """Walk a directory recursively, in breadth-first order. + + The `recurse` predicate determines whether a directory is recursed. + + Entries at each directory level are sorted. + """ + entries = scandir(path) + yield from entries + for entry in entries: + if entry.is_dir() and recurse(entry): + yield from visit(entry.path, recurse) + + +def absolutepath(path: Union[Path, str]) -> Path: + """Convert a path to an absolute path using os.path.abspath. + + Prefer this over Path.resolve() (see #6523). + Prefer this over Path.absolute() (not public, doesn't normalize). + """ + return Path(os.path.abspath(str(path))) + + +def commonpath(path1: Path, path2: Path) -> Optional[Path]: + """Return the common part shared with the other path, or None if there is + no common part. + + If one path is relative and one is absolute, returns None. + """ + try: + return Path(os.path.commonpath((str(path1), str(path2)))) + except ValueError: + return None + + +def bestrelpath(directory: Path, dest: Path) -> str: + """Return a string which is a relative path from directory to dest such + that directory/bestrelpath == dest. + + The paths must be either both absolute or both relative. + + If no such path can be determined, returns dest. + """ + assert isinstance(directory, Path) + assert isinstance(dest, Path) + if dest == directory: + return os.curdir + # Find the longest common directory. + base = commonpath(directory, dest) + # Can be the case on Windows for two absolute paths on different drives. + # Can be the case for two relative paths without common prefix. + # Can be the case for a relative path and an absolute path. + if not base: + return str(dest) + reldirectory = directory.relative_to(base) + reldest = dest.relative_to(base) + return os.path.join( + # Back from directory to base. + *([os.pardir] * len(reldirectory.parts)), + # Forward from base to dest. + *reldest.parts, + ) + + +# Originates from py. path.local.copy(), with siginficant trims and adjustments. +# TODO(py38): Replace with shutil.copytree(..., symlinks=True, dirs_exist_ok=True) +def copytree(source: Path, target: Path) -> None: + """Recursively copy a source directory to target.""" + assert source.is_dir() + for entry in visit(source, recurse=lambda entry: not entry.is_symlink()): + x = Path(entry) + relpath = x.relative_to(source) + newx = target / relpath + newx.parent.mkdir(exist_ok=True) + if x.is_symlink(): + newx.symlink_to(os.readlink(x)) + elif x.is_file(): + shutil.copyfile(x, newx) + elif x.is_dir(): + newx.mkdir(exist_ok=True) + + +def safe_exists(p: Path) -> bool: + """Like Path.exists(), but account for input arguments that might be too long (#11394).""" + try: + return p.exists() + except (ValueError, OSError): + # ValueError: stat: path too long for Windows + # OSError: [WinError 123] The filename, directory name, or volume label syntax is incorrect + return False diff --git a/venv/lib/python3.12/site-packages/_pytest/py.typed b/venv/lib/python3.12/site-packages/_pytest/py.typed new file mode 100644 index 0000000000..e69de29bb2 diff --git a/venv/lib/python3.12/site-packages/_pytest/pytester.py b/venv/lib/python3.12/site-packages/_pytest/pytester.py new file mode 100644 index 0000000000..0771065e06 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/pytester.py @@ -0,0 +1,1789 @@ +"""(Disabled by default) support for testing pytest and pytest plugins. + +PYTEST_DONT_REWRITE +""" +import collections.abc +import contextlib +import gc +import importlib +import locale +import os +import platform +import re +import shutil +import subprocess +import sys +import traceback +from fnmatch import fnmatch +from io import StringIO +from pathlib import Path +from typing import Any +from typing import Callable +from typing import Dict +from typing import Generator +from typing import IO +from typing import Iterable +from typing import List +from typing import Optional +from typing import overload +from typing import Sequence +from typing import TextIO +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import Union +from weakref import WeakKeyDictionary + +from iniconfig import IniConfig +from iniconfig import SectionWrapper + +from _pytest import timing +from _pytest._code import Source +from _pytest.capture import _get_multicapture +from _pytest.compat import final +from _pytest.compat import NOTSET +from _pytest.compat import NotSetType +from _pytest.config import _PluggyPlugin +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config import main +from _pytest.config import PytestPluginManager +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.main import Session +from _pytest.monkeypatch import MonkeyPatch +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.outcomes import fail +from _pytest.outcomes import importorskip +from _pytest.outcomes import skip +from _pytest.pathlib import bestrelpath +from _pytest.pathlib import copytree +from _pytest.pathlib import make_numbered_dir +from _pytest.reports import CollectReport +from _pytest.reports import TestReport +from _pytest.tmpdir import TempPathFactory +from _pytest.warning_types import PytestWarning + + +if TYPE_CHECKING: + from typing_extensions import Final + from typing_extensions import Literal + + import pexpect + + +pytest_plugins = ["pytester_assertions"] + + +IGNORE_PAM = [ # filenames added when obtaining details about the current user + "/var/lib/sss/mc/passwd" +] + + +def pytest_addoption(parser: Parser) -> None: + parser.addoption( + "--lsof", + action="store_true", + dest="lsof", + default=False, + help="Run FD checks if lsof is available", + ) + + parser.addoption( + "--runpytest", + default="inprocess", + dest="runpytest", + choices=("inprocess", "subprocess"), + help=( + "Run pytest sub runs in tests using an 'inprocess' " + "or 'subprocess' (python -m main) method" + ), + ) + + parser.addini( + "pytester_example_dir", help="Directory to take the pytester example files from" + ) + + +def pytest_configure(config: Config) -> None: + if config.getvalue("lsof"): + checker = LsofFdLeakChecker() + if checker.matching_platform(): + config.pluginmanager.register(checker) + + config.addinivalue_line( + "markers", + "pytester_example_path(*path_segments): join the given path " + "segments to `pytester_example_dir` for this test.", + ) + + +class LsofFdLeakChecker: + def get_open_files(self) -> List[Tuple[str, str]]: + out = subprocess.run( + ("lsof", "-Ffn0", "-p", str(os.getpid())), + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + check=True, + text=True, + encoding=locale.getpreferredencoding(False), + ).stdout + + def isopen(line: str) -> bool: + return line.startswith("f") and ( + "deleted" not in line + and "mem" not in line + and "txt" not in line + and "cwd" not in line + ) + + open_files = [] + + for line in out.split("\n"): + if isopen(line): + fields = line.split("\0") + fd = fields[0][1:] + filename = fields[1][1:] + if filename in IGNORE_PAM: + continue + if filename.startswith("/"): + open_files.append((fd, filename)) + + return open_files + + def matching_platform(self) -> bool: + try: + subprocess.run(("lsof", "-v"), check=True) + except (OSError, subprocess.CalledProcessError): + return False + else: + return True + + @hookimpl(hookwrapper=True, tryfirst=True) + def pytest_runtest_protocol(self, item: Item) -> Generator[None, None, None]: + lines1 = self.get_open_files() + yield + if hasattr(sys, "pypy_version_info"): + gc.collect() + lines2 = self.get_open_files() + + new_fds = {t[0] for t in lines2} - {t[0] for t in lines1} + leaked_files = [t for t in lines2 if t[0] in new_fds] + if leaked_files: + error = [ + "***** %s FD leakage detected" % len(leaked_files), + *(str(f) for f in leaked_files), + "*** Before:", + *(str(f) for f in lines1), + "*** After:", + *(str(f) for f in lines2), + "***** %s FD leakage detected" % len(leaked_files), + "*** function %s:%s: %s " % item.location, + "See issue #2366", + ] + item.warn(PytestWarning("\n".join(error))) + + +# used at least by pytest-xdist plugin + + +@fixture +def _pytest(request: FixtureRequest) -> "PytestArg": + """Return a helper which offers a gethookrecorder(hook) method which + returns a HookRecorder instance which helps to make assertions about called + hooks.""" + return PytestArg(request) + + +class PytestArg: + def __init__(self, request: FixtureRequest) -> None: + self._request = request + + def gethookrecorder(self, hook) -> "HookRecorder": + hookrecorder = HookRecorder(hook._pm) + self._request.addfinalizer(hookrecorder.finish_recording) + return hookrecorder + + +def get_public_names(values: Iterable[str]) -> List[str]: + """Only return names from iterator values without a leading underscore.""" + return [x for x in values if x[0] != "_"] + + +@final +class RecordedHookCall: + """A recorded call to a hook. + + The arguments to the hook call are set as attributes. + For example: + + .. code-block:: python + + calls = hook_recorder.getcalls("pytest_runtest_setup") + # Suppose pytest_runtest_setup was called once with `item=an_item`. + assert calls[0].item is an_item + """ + + def __init__(self, name: str, kwargs) -> None: + self.__dict__.update(kwargs) + self._name = name + + def __repr__(self) -> str: + d = self.__dict__.copy() + del d["_name"] + return f"" + + if TYPE_CHECKING: + # The class has undetermined attributes, this tells mypy about it. + def __getattr__(self, key: str): + ... + + +@final +class HookRecorder: + """Record all hooks called in a plugin manager. + + Hook recorders are created by :class:`Pytester`. + + This wraps all the hook calls in the plugin manager, recording each call + before propagating the normal calls. + """ + + def __init__( + self, pluginmanager: PytestPluginManager, *, _ispytest: bool = False + ) -> None: + check_ispytest(_ispytest) + + self._pluginmanager = pluginmanager + self.calls: List[RecordedHookCall] = [] + self.ret: Optional[Union[int, ExitCode]] = None + + def before(hook_name: str, hook_impls, kwargs) -> None: + self.calls.append(RecordedHookCall(hook_name, kwargs)) + + def after(outcome, hook_name: str, hook_impls, kwargs) -> None: + pass + + self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after) + + def finish_recording(self) -> None: + self._undo_wrapping() + + def getcalls(self, names: Union[str, Iterable[str]]) -> List[RecordedHookCall]: + """Get all recorded calls to hooks with the given names (or name).""" + if isinstance(names, str): + names = names.split() + return [call for call in self.calls if call._name in names] + + def assert_contains(self, entries: Sequence[Tuple[str, str]]) -> None: + __tracebackhide__ = True + i = 0 + entries = list(entries) + backlocals = sys._getframe(1).f_locals + while entries: + name, check = entries.pop(0) + for ind, call in enumerate(self.calls[i:]): + if call._name == name: + print("NAMEMATCH", name, call) + if eval(check, backlocals, call.__dict__): + print("CHECKERMATCH", repr(check), "->", call) + else: + print("NOCHECKERMATCH", repr(check), "-", call) + continue + i += ind + 1 + break + print("NONAMEMATCH", name, "with", call) + else: + fail(f"could not find {name!r} check {check!r}") + + def popcall(self, name: str) -> RecordedHookCall: + __tracebackhide__ = True + for i, call in enumerate(self.calls): + if call._name == name: + del self.calls[i] + return call + lines = [f"could not find call {name!r}, in:"] + lines.extend([" %s" % x for x in self.calls]) + fail("\n".join(lines)) + + def getcall(self, name: str) -> RecordedHookCall: + values = self.getcalls(name) + assert len(values) == 1, (name, values) + return values[0] + + # functionality for test reports + + @overload + def getreports( + self, + names: "Literal['pytest_collectreport']", + ) -> Sequence[CollectReport]: + ... + + @overload + def getreports( + self, + names: "Literal['pytest_runtest_logreport']", + ) -> Sequence[TestReport]: + ... + + @overload + def getreports( + self, + names: Union[str, Iterable[str]] = ( + "pytest_collectreport", + "pytest_runtest_logreport", + ), + ) -> Sequence[Union[CollectReport, TestReport]]: + ... + + def getreports( + self, + names: Union[str, Iterable[str]] = ( + "pytest_collectreport", + "pytest_runtest_logreport", + ), + ) -> Sequence[Union[CollectReport, TestReport]]: + return [x.report for x in self.getcalls(names)] + + def matchreport( + self, + inamepart: str = "", + names: Union[str, Iterable[str]] = ( + "pytest_runtest_logreport", + "pytest_collectreport", + ), + when: Optional[str] = None, + ) -> Union[CollectReport, TestReport]: + """Return a testreport whose dotted import path matches.""" + values = [] + for rep in self.getreports(names=names): + if not when and rep.when != "call" and rep.passed: + # setup/teardown passing reports - let's ignore those + continue + if when and rep.when != when: + continue + if not inamepart or inamepart in rep.nodeid.split("::"): + values.append(rep) + if not values: + raise ValueError( + "could not find test report matching %r: " + "no test reports at all!" % (inamepart,) + ) + if len(values) > 1: + raise ValueError( + "found 2 or more testreports matching {!r}: {}".format( + inamepart, values + ) + ) + return values[0] + + @overload + def getfailures( + self, + names: "Literal['pytest_collectreport']", + ) -> Sequence[CollectReport]: + ... + + @overload + def getfailures( + self, + names: "Literal['pytest_runtest_logreport']", + ) -> Sequence[TestReport]: + ... + + @overload + def getfailures( + self, + names: Union[str, Iterable[str]] = ( + "pytest_collectreport", + "pytest_runtest_logreport", + ), + ) -> Sequence[Union[CollectReport, TestReport]]: + ... + + def getfailures( + self, + names: Union[str, Iterable[str]] = ( + "pytest_collectreport", + "pytest_runtest_logreport", + ), + ) -> Sequence[Union[CollectReport, TestReport]]: + return [rep for rep in self.getreports(names) if rep.failed] + + def getfailedcollections(self) -> Sequence[CollectReport]: + return self.getfailures("pytest_collectreport") + + def listoutcomes( + self, + ) -> Tuple[ + Sequence[TestReport], + Sequence[Union[CollectReport, TestReport]], + Sequence[Union[CollectReport, TestReport]], + ]: + passed = [] + skipped = [] + failed = [] + for rep in self.getreports( + ("pytest_collectreport", "pytest_runtest_logreport") + ): + if rep.passed: + if rep.when == "call": + assert isinstance(rep, TestReport) + passed.append(rep) + elif rep.skipped: + skipped.append(rep) + else: + assert rep.failed, f"Unexpected outcome: {rep!r}" + failed.append(rep) + return passed, skipped, failed + + def countoutcomes(self) -> List[int]: + return [len(x) for x in self.listoutcomes()] + + def assertoutcome(self, passed: int = 0, skipped: int = 0, failed: int = 0) -> None: + __tracebackhide__ = True + from _pytest.pytester_assertions import assertoutcome + + outcomes = self.listoutcomes() + assertoutcome( + outcomes, + passed=passed, + skipped=skipped, + failed=failed, + ) + + def clear(self) -> None: + self.calls[:] = [] + + +@fixture +def linecomp() -> "LineComp": + """A :class: `LineComp` instance for checking that an input linearly + contains a sequence of strings.""" + return LineComp() + + +@fixture(name="LineMatcher") +def LineMatcher_fixture(request: FixtureRequest) -> Type["LineMatcher"]: + """A reference to the :class: `LineMatcher`. + + This is instantiable with a list of lines (without their trailing newlines). + This is useful for testing large texts, such as the output of commands. + """ + return LineMatcher + + +@fixture +def pytester( + request: FixtureRequest, tmp_path_factory: TempPathFactory, monkeypatch: MonkeyPatch +) -> "Pytester": + """ + Facilities to write tests/configuration files, execute pytest in isolation, and match + against expected output, perfect for black-box testing of pytest plugins. + + It attempts to isolate the test run from external factors as much as possible, modifying + the current working directory to ``path`` and environment variables during initialization. + + It is particularly useful for testing plugins. It is similar to the :fixture:`tmp_path` + fixture but provides methods which aid in testing pytest itself. + """ + return Pytester(request, tmp_path_factory, monkeypatch, _ispytest=True) + + +@fixture +def _sys_snapshot() -> Generator[None, None, None]: + snappaths = SysPathsSnapshot() + snapmods = SysModulesSnapshot() + yield + snapmods.restore() + snappaths.restore() + + +@fixture +def _config_for_test() -> Generator[Config, None, None]: + from _pytest.config import get_config + + config = get_config() + yield config + config._ensure_unconfigure() # cleanup, e.g. capman closing tmpfiles. + + +# Regex to match the session duration string in the summary: "74.34s". +rex_session_duration = re.compile(r"\d+\.\d\ds") +# Regex to match all the counts and phrases in the summary line: "34 passed, 111 skipped". +rex_outcome = re.compile(r"(\d+) (\w+)") + + +@final +class RunResult: + """The result of running a command from :class:`~pytest.Pytester`.""" + + def __init__( + self, + ret: Union[int, ExitCode], + outlines: List[str], + errlines: List[str], + duration: float, + ) -> None: + try: + self.ret: Union[int, ExitCode] = ExitCode(ret) + """The return value.""" + except ValueError: + self.ret = ret + self.outlines = outlines + """List of lines captured from stdout.""" + self.errlines = errlines + """List of lines captured from stderr.""" + self.stdout = LineMatcher(outlines) + """:class:`~pytest.LineMatcher` of stdout. + + Use e.g. :func:`str(stdout) ` to reconstruct stdout, or the commonly used + :func:`stdout.fnmatch_lines() ` method. + """ + self.stderr = LineMatcher(errlines) + """:class:`~pytest.LineMatcher` of stderr.""" + self.duration = duration + """Duration in seconds.""" + + def __repr__(self) -> str: + return ( + "" + % (self.ret, len(self.stdout.lines), len(self.stderr.lines), self.duration) + ) + + def parseoutcomes(self) -> Dict[str, int]: + """Return a dictionary of outcome noun -> count from parsing the terminal + output that the test process produced. + + The returned nouns will always be in plural form:: + + ======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ==== + + Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``. + """ + return self.parse_summary_nouns(self.outlines) + + @classmethod + def parse_summary_nouns(cls, lines) -> Dict[str, int]: + """Extract the nouns from a pytest terminal summary line. + + It always returns the plural noun for consistency:: + + ======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ==== + + Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``. + """ + for line in reversed(lines): + if rex_session_duration.search(line): + outcomes = rex_outcome.findall(line) + ret = {noun: int(count) for (count, noun) in outcomes} + break + else: + raise ValueError("Pytest terminal summary report not found") + + to_plural = { + "warning": "warnings", + "error": "errors", + } + return {to_plural.get(k, k): v for k, v in ret.items()} + + def assert_outcomes( + self, + passed: int = 0, + skipped: int = 0, + failed: int = 0, + errors: int = 0, + xpassed: int = 0, + xfailed: int = 0, + warnings: Optional[int] = None, + deselected: Optional[int] = None, + ) -> None: + """ + Assert that the specified outcomes appear with the respective + numbers (0 means it didn't occur) in the text output from a test run. + + ``warnings`` and ``deselected`` are only checked if not None. + """ + __tracebackhide__ = True + from _pytest.pytester_assertions import assert_outcomes + + outcomes = self.parseoutcomes() + assert_outcomes( + outcomes, + passed=passed, + skipped=skipped, + failed=failed, + errors=errors, + xpassed=xpassed, + xfailed=xfailed, + warnings=warnings, + deselected=deselected, + ) + + +class CwdSnapshot: + def __init__(self) -> None: + self.__saved = os.getcwd() + + def restore(self) -> None: + os.chdir(self.__saved) + + +class SysModulesSnapshot: + def __init__(self, preserve: Optional[Callable[[str], bool]] = None) -> None: + self.__preserve = preserve + self.__saved = dict(sys.modules) + + def restore(self) -> None: + if self.__preserve: + self.__saved.update( + (k, m) for k, m in sys.modules.items() if self.__preserve(k) + ) + sys.modules.clear() + sys.modules.update(self.__saved) + + +class SysPathsSnapshot: + def __init__(self) -> None: + self.__saved = list(sys.path), list(sys.meta_path) + + def restore(self) -> None: + sys.path[:], sys.meta_path[:] = self.__saved + + +@final +class Pytester: + """ + Facilities to write tests/configuration files, execute pytest in isolation, and match + against expected output, perfect for black-box testing of pytest plugins. + + It attempts to isolate the test run from external factors as much as possible, modifying + the current working directory to :attr:`path` and environment variables during initialization. + """ + + __test__ = False + + CLOSE_STDIN: "Final" = NOTSET + + class TimeoutExpired(Exception): + pass + + def __init__( + self, + request: FixtureRequest, + tmp_path_factory: TempPathFactory, + monkeypatch: MonkeyPatch, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._request = request + self._mod_collections: WeakKeyDictionary[ + Collector, List[Union[Item, Collector]] + ] = WeakKeyDictionary() + if request.function: + name: str = request.function.__name__ + else: + name = request.node.name + self._name = name + self._path: Path = tmp_path_factory.mktemp(name, numbered=True) + #: A list of plugins to use with :py:meth:`parseconfig` and + #: :py:meth:`runpytest`. Initially this is an empty list but plugins can + #: be added to the list. The type of items to add to the list depends on + #: the method using them so refer to them for details. + self.plugins: List[Union[str, _PluggyPlugin]] = [] + self._cwd_snapshot = CwdSnapshot() + self._sys_path_snapshot = SysPathsSnapshot() + self._sys_modules_snapshot = self.__take_sys_modules_snapshot() + self.chdir() + self._request.addfinalizer(self._finalize) + self._method = self._request.config.getoption("--runpytest") + self._test_tmproot = tmp_path_factory.mktemp(f"tmp-{name}", numbered=True) + + self._monkeypatch = mp = monkeypatch + mp.setenv("PYTEST_DEBUG_TEMPROOT", str(self._test_tmproot)) + # Ensure no unexpected caching via tox. + mp.delenv("TOX_ENV_DIR", raising=False) + # Discard outer pytest options. + mp.delenv("PYTEST_ADDOPTS", raising=False) + # Ensure no user config is used. + tmphome = str(self.path) + mp.setenv("HOME", tmphome) + mp.setenv("USERPROFILE", tmphome) + # Do not use colors for inner runs by default. + mp.setenv("PY_COLORS", "0") + + @property + def path(self) -> Path: + """Temporary directory path used to create files/run tests from, etc.""" + return self._path + + def __repr__(self) -> str: + return f"" + + def _finalize(self) -> None: + """ + Clean up global state artifacts. + + Some methods modify the global interpreter state and this tries to + clean this up. It does not remove the temporary directory however so + it can be looked at after the test run has finished. + """ + self._sys_modules_snapshot.restore() + self._sys_path_snapshot.restore() + self._cwd_snapshot.restore() + + def __take_sys_modules_snapshot(self) -> SysModulesSnapshot: + # Some zope modules used by twisted-related tests keep internal state + # and can't be deleted; we had some trouble in the past with + # `zope.interface` for example. + # + # Preserve readline due to https://bugs.python.org/issue41033. + # pexpect issues a SIGWINCH. + def preserve_module(name): + return name.startswith(("zope", "readline")) + + return SysModulesSnapshot(preserve=preserve_module) + + def make_hook_recorder(self, pluginmanager: PytestPluginManager) -> HookRecorder: + """Create a new :class:`HookRecorder` for a :class:`PytestPluginManager`.""" + pluginmanager.reprec = reprec = HookRecorder(pluginmanager, _ispytest=True) # type: ignore[attr-defined] + self._request.addfinalizer(reprec.finish_recording) + return reprec + + def chdir(self) -> None: + """Cd into the temporary directory. + + This is done automatically upon instantiation. + """ + os.chdir(self.path) + + def _makefile( + self, + ext: str, + lines: Sequence[Union[Any, bytes]], + files: Dict[str, str], + encoding: str = "utf-8", + ) -> Path: + items = list(files.items()) + + if ext and not ext.startswith("."): + raise ValueError( + f"pytester.makefile expects a file extension, try .{ext} instead of {ext}" + ) + + def to_text(s: Union[Any, bytes]) -> str: + return s.decode(encoding) if isinstance(s, bytes) else str(s) + + if lines: + source = "\n".join(to_text(x) for x in lines) + basename = self._name + items.insert(0, (basename, source)) + + ret = None + for basename, value in items: + p = self.path.joinpath(basename).with_suffix(ext) + p.parent.mkdir(parents=True, exist_ok=True) + source_ = Source(value) + source = "\n".join(to_text(line) for line in source_.lines) + p.write_text(source.strip(), encoding=encoding) + if ret is None: + ret = p + assert ret is not None + return ret + + def makefile(self, ext: str, *args: str, **kwargs: str) -> Path: + r"""Create new text file(s) in the test directory. + + :param ext: + The extension the file(s) should use, including the dot, e.g. `.py`. + :param args: + All args are treated as strings and joined using newlines. + The result is written as contents to the file. The name of the + file is based on the test function requesting this fixture. + :param kwargs: + Each keyword is the name of a file, while the value of it will + be written as contents of the file. + :returns: + The first created file. + + Examples: + + .. code-block:: python + + pytester.makefile(".txt", "line1", "line2") + + pytester.makefile(".ini", pytest="[pytest]\naddopts=-rs\n") + + To create binary files, use :meth:`pathlib.Path.write_bytes` directly: + + .. code-block:: python + + filename = pytester.path.joinpath("foo.bin") + filename.write_bytes(b"...") + """ + return self._makefile(ext, args, kwargs) + + def makeconftest(self, source: str) -> Path: + """Write a contest.py file. + + :param source: The contents. + :returns: The conftest.py file. + """ + return self.makepyfile(conftest=source) + + def makeini(self, source: str) -> Path: + """Write a tox.ini file. + + :param source: The contents. + :returns: The tox.ini file. + """ + return self.makefile(".ini", tox=source) + + def getinicfg(self, source: str) -> SectionWrapper: + """Return the pytest section from the tox.ini config file.""" + p = self.makeini(source) + return IniConfig(str(p))["pytest"] + + def makepyprojecttoml(self, source: str) -> Path: + """Write a pyproject.toml file. + + :param source: The contents. + :returns: The pyproject.ini file. + + .. versionadded:: 6.0 + """ + return self.makefile(".toml", pyproject=source) + + def makepyfile(self, *args, **kwargs) -> Path: + r"""Shortcut for .makefile() with a .py extension. + + Defaults to the test name with a '.py' extension, e.g test_foobar.py, overwriting + existing files. + + Examples: + + .. code-block:: python + + def test_something(pytester): + # Initial file is created test_something.py. + pytester.makepyfile("foobar") + # To create multiple files, pass kwargs accordingly. + pytester.makepyfile(custom="foobar") + # At this point, both 'test_something.py' & 'custom.py' exist in the test directory. + + """ + return self._makefile(".py", args, kwargs) + + def maketxtfile(self, *args, **kwargs) -> Path: + r"""Shortcut for .makefile() with a .txt extension. + + Defaults to the test name with a '.txt' extension, e.g test_foobar.txt, overwriting + existing files. + + Examples: + + .. code-block:: python + + def test_something(pytester): + # Initial file is created test_something.txt. + pytester.maketxtfile("foobar") + # To create multiple files, pass kwargs accordingly. + pytester.maketxtfile(custom="foobar") + # At this point, both 'test_something.txt' & 'custom.txt' exist in the test directory. + + """ + return self._makefile(".txt", args, kwargs) + + def syspathinsert( + self, path: Optional[Union[str, "os.PathLike[str]"]] = None + ) -> None: + """Prepend a directory to sys.path, defaults to :attr:`path`. + + This is undone automatically when this object dies at the end of each + test. + + :param path: + The path. + """ + if path is None: + path = self.path + + self._monkeypatch.syspath_prepend(str(path)) + + def mkdir(self, name: Union[str, "os.PathLike[str]"]) -> Path: + """Create a new (sub)directory. + + :param name: + The name of the directory, relative to the pytester path. + :returns: + The created directory. + """ + p = self.path / name + p.mkdir() + return p + + def mkpydir(self, name: Union[str, "os.PathLike[str]"]) -> Path: + """Create a new python package. + + This creates a (sub)directory with an empty ``__init__.py`` file so it + gets recognised as a Python package. + """ + p = self.path / name + p.mkdir() + p.joinpath("__init__.py").touch() + return p + + def copy_example(self, name: Optional[str] = None) -> Path: + """Copy file from project's directory into the testdir. + + :param name: + The name of the file to copy. + :return: + Path to the copied directory (inside ``self.path``). + """ + example_dir_ = self._request.config.getini("pytester_example_dir") + if example_dir_ is None: + raise ValueError("pytester_example_dir is unset, can't copy examples") + example_dir: Path = self._request.config.rootpath / example_dir_ + + for extra_element in self._request.node.iter_markers("pytester_example_path"): + assert extra_element.args + example_dir = example_dir.joinpath(*extra_element.args) + + if name is None: + func_name = self._name + maybe_dir = example_dir / func_name + maybe_file = example_dir / (func_name + ".py") + + if maybe_dir.is_dir(): + example_path = maybe_dir + elif maybe_file.is_file(): + example_path = maybe_file + else: + raise LookupError( + f"{func_name} can't be found as module or package in {example_dir}" + ) + else: + example_path = example_dir.joinpath(name) + + if example_path.is_dir() and not example_path.joinpath("__init__.py").is_file(): + copytree(example_path, self.path) + return self.path + elif example_path.is_file(): + result = self.path.joinpath(example_path.name) + shutil.copy(example_path, result) + return result + else: + raise LookupError( + f'example "{example_path}" is not found as a file or directory' + ) + + def getnode( + self, config: Config, arg: Union[str, "os.PathLike[str]"] + ) -> Union[Collector, Item]: + """Get the collection node of a file. + + :param config: + A pytest config. + See :py:meth:`parseconfig` and :py:meth:`parseconfigure` for creating it. + :param arg: + Path to the file. + :returns: + The node. + """ + session = Session.from_config(config) + assert "::" not in str(arg) + p = Path(os.path.abspath(arg)) + config.hook.pytest_sessionstart(session=session) + res = session.perform_collect([str(p)], genitems=False)[0] + config.hook.pytest_sessionfinish(session=session, exitstatus=ExitCode.OK) + return res + + def getpathnode( + self, path: Union[str, "os.PathLike[str]"] + ) -> Union[Collector, Item]: + """Return the collection node of a file. + + This is like :py:meth:`getnode` but uses :py:meth:`parseconfigure` to + create the (configured) pytest Config instance. + + :param path: + Path to the file. + :returns: + The node. + """ + path = Path(path) + config = self.parseconfigure(path) + session = Session.from_config(config) + x = bestrelpath(session.path, path) + config.hook.pytest_sessionstart(session=session) + res = session.perform_collect([x], genitems=False)[0] + config.hook.pytest_sessionfinish(session=session, exitstatus=ExitCode.OK) + return res + + def genitems(self, colitems: Sequence[Union[Item, Collector]]) -> List[Item]: + """Generate all test items from a collection node. + + This recurses into the collection node and returns a list of all the + test items contained within. + + :param colitems: + The collection nodes. + :returns: + The collected items. + """ + session = colitems[0].session + result: List[Item] = [] + for colitem in colitems: + result.extend(session.genitems(colitem)) + return result + + def runitem(self, source: str) -> Any: + """Run the "test_func" Item. + + The calling test instance (class containing the test method) must + provide a ``.getrunner()`` method which should return a runner which + can run the test protocol for a single item, e.g. + :py:func:`_pytest.runner.runtestprotocol`. + """ + # used from runner functional tests + item = self.getitem(source) + # the test class where we are called from wants to provide the runner + testclassinstance = self._request.instance + runner = testclassinstance.getrunner() + return runner(item) + + def inline_runsource(self, source: str, *cmdlineargs) -> HookRecorder: + """Run a test module in process using ``pytest.main()``. + + This run writes "source" into a temporary file and runs + ``pytest.main()`` on it, returning a :py:class:`HookRecorder` instance + for the result. + + :param source: The source code of the test module. + :param cmdlineargs: Any extra command line arguments to use. + """ + p = self.makepyfile(source) + values = list(cmdlineargs) + [p] + return self.inline_run(*values) + + def inline_genitems(self, *args) -> Tuple[List[Item], HookRecorder]: + """Run ``pytest.main(['--collect-only'])`` in-process. + + Runs the :py:func:`pytest.main` function to run all of pytest inside + the test process itself like :py:meth:`inline_run`, but returns a + tuple of the collected items and a :py:class:`HookRecorder` instance. + """ + rec = self.inline_run("--collect-only", *args) + items = [x.item for x in rec.getcalls("pytest_itemcollected")] + return items, rec + + def inline_run( + self, + *args: Union[str, "os.PathLike[str]"], + plugins=(), + no_reraise_ctrlc: bool = False, + ) -> HookRecorder: + """Run ``pytest.main()`` in-process, returning a HookRecorder. + + Runs the :py:func:`pytest.main` function to run all of pytest inside + the test process itself. This means it can return a + :py:class:`HookRecorder` instance which gives more detailed results + from that run than can be done by matching stdout/stderr from + :py:meth:`runpytest`. + + :param args: + Command line arguments to pass to :py:func:`pytest.main`. + :param plugins: + Extra plugin instances the ``pytest.main()`` instance should use. + :param no_reraise_ctrlc: + Typically we reraise keyboard interrupts from the child run. If + True, the KeyboardInterrupt exception is captured. + """ + # (maybe a cpython bug?) the importlib cache sometimes isn't updated + # properly between file creation and inline_run (especially if imports + # are interspersed with file creation) + importlib.invalidate_caches() + + plugins = list(plugins) + finalizers = [] + try: + # Any sys.module or sys.path changes done while running pytest + # inline should be reverted after the test run completes to avoid + # clashing with later inline tests run within the same pytest test, + # e.g. just because they use matching test module names. + finalizers.append(self.__take_sys_modules_snapshot().restore) + finalizers.append(SysPathsSnapshot().restore) + + # Important note: + # - our tests should not leave any other references/registrations + # laying around other than possibly loaded test modules + # referenced from sys.modules, as nothing will clean those up + # automatically + + rec = [] + + class Collect: + def pytest_configure(x, config: Config) -> None: + rec.append(self.make_hook_recorder(config.pluginmanager)) + + plugins.append(Collect()) + ret = main([str(x) for x in args], plugins=plugins) + if len(rec) == 1: + reprec = rec.pop() + else: + + class reprec: # type: ignore + pass + + reprec.ret = ret + + # Typically we reraise keyboard interrupts from the child run + # because it's our user requesting interruption of the testing. + if ret == ExitCode.INTERRUPTED and not no_reraise_ctrlc: + calls = reprec.getcalls("pytest_keyboard_interrupt") + if calls and calls[-1].excinfo.type == KeyboardInterrupt: + raise KeyboardInterrupt() + return reprec + finally: + for finalizer in finalizers: + finalizer() + + def runpytest_inprocess( + self, *args: Union[str, "os.PathLike[str]"], **kwargs: Any + ) -> RunResult: + """Return result of running pytest in-process, providing a similar + interface to what self.runpytest() provides.""" + syspathinsert = kwargs.pop("syspathinsert", False) + + if syspathinsert: + self.syspathinsert() + now = timing.time() + capture = _get_multicapture("sys") + capture.start_capturing() + try: + try: + reprec = self.inline_run(*args, **kwargs) + except SystemExit as e: + ret = e.args[0] + try: + ret = ExitCode(e.args[0]) + except ValueError: + pass + + class reprec: # type: ignore + ret = ret + + except Exception: + traceback.print_exc() + + class reprec: # type: ignore + ret = ExitCode(3) + + finally: + out, err = capture.readouterr() + capture.stop_capturing() + sys.stdout.write(out) + sys.stderr.write(err) + + assert reprec.ret is not None + res = RunResult( + reprec.ret, out.splitlines(), err.splitlines(), timing.time() - now + ) + res.reprec = reprec # type: ignore + return res + + def runpytest( + self, *args: Union[str, "os.PathLike[str]"], **kwargs: Any + ) -> RunResult: + """Run pytest inline or in a subprocess, depending on the command line + option "--runpytest" and return a :py:class:`~pytest.RunResult`.""" + new_args = self._ensure_basetemp(args) + if self._method == "inprocess": + return self.runpytest_inprocess(*new_args, **kwargs) + elif self._method == "subprocess": + return self.runpytest_subprocess(*new_args, **kwargs) + raise RuntimeError(f"Unrecognized runpytest option: {self._method}") + + def _ensure_basetemp( + self, args: Sequence[Union[str, "os.PathLike[str]"]] + ) -> List[Union[str, "os.PathLike[str]"]]: + new_args = list(args) + for x in new_args: + if str(x).startswith("--basetemp"): + break + else: + new_args.append("--basetemp=%s" % self.path.parent.joinpath("basetemp")) + return new_args + + def parseconfig(self, *args: Union[str, "os.PathLike[str]"]) -> Config: + """Return a new pytest :class:`pytest.Config` instance from given + commandline args. + + This invokes the pytest bootstrapping code in _pytest.config to create a + new :py:class:`pytest.PytestPluginManager` and call the + :hook:`pytest_cmdline_parse` hook to create a new :class:`pytest.Config` + instance. + + If :attr:`plugins` has been populated they should be plugin modules + to be registered with the plugin manager. + """ + import _pytest.config + + new_args = self._ensure_basetemp(args) + new_args = [str(x) for x in new_args] + + config = _pytest.config._prepareconfig(new_args, self.plugins) # type: ignore[arg-type] + # we don't know what the test will do with this half-setup config + # object and thus we make sure it gets unconfigured properly in any + # case (otherwise capturing could still be active, for example) + self._request.addfinalizer(config._ensure_unconfigure) + return config + + def parseconfigure(self, *args: Union[str, "os.PathLike[str]"]) -> Config: + """Return a new pytest configured Config instance. + + Returns a new :py:class:`pytest.Config` instance like + :py:meth:`parseconfig`, but also calls the :hook:`pytest_configure` + hook. + """ + config = self.parseconfig(*args) + config._do_configure() + return config + + def getitem( + self, source: Union[str, "os.PathLike[str]"], funcname: str = "test_func" + ) -> Item: + """Return the test item for a test function. + + Writes the source to a python file and runs pytest's collection on + the resulting module, returning the test item for the requested + function name. + + :param source: + The module source. + :param funcname: + The name of the test function for which to return a test item. + :returns: + The test item. + """ + items = self.getitems(source) + for item in items: + if item.name == funcname: + return item + assert 0, "{!r} item not found in module:\n{}\nitems: {}".format( + funcname, source, items + ) + + def getitems(self, source: Union[str, "os.PathLike[str]"]) -> List[Item]: + """Return all test items collected from the module. + + Writes the source to a Python file and runs pytest's collection on + the resulting module, returning all test items contained within. + """ + modcol = self.getmodulecol(source) + return self.genitems([modcol]) + + def getmodulecol( + self, + source: Union[str, "os.PathLike[str]"], + configargs=(), + *, + withinit: bool = False, + ): + """Return the module collection node for ``source``. + + Writes ``source`` to a file using :py:meth:`makepyfile` and then + runs the pytest collection on it, returning the collection node for the + test module. + + :param source: + The source code of the module to collect. + + :param configargs: + Any extra arguments to pass to :py:meth:`parseconfigure`. + + :param withinit: + Whether to also write an ``__init__.py`` file to the same + directory to ensure it is a package. + """ + if isinstance(source, os.PathLike): + path = self.path.joinpath(source) + assert not withinit, "not supported for paths" + else: + kw = {self._name: str(source)} + path = self.makepyfile(**kw) + if withinit: + self.makepyfile(__init__="#") + self.config = config = self.parseconfigure(path, *configargs) + return self.getnode(config, path) + + def collect_by_name( + self, modcol: Collector, name: str + ) -> Optional[Union[Item, Collector]]: + """Return the collection node for name from the module collection. + + Searches a module collection node for a collection node matching the + given name. + + :param modcol: A module collection node; see :py:meth:`getmodulecol`. + :param name: The name of the node to return. + """ + if modcol not in self._mod_collections: + self._mod_collections[modcol] = list(modcol.collect()) + for colitem in self._mod_collections[modcol]: + if colitem.name == name: + return colitem + return None + + def popen( + self, + cmdargs: Sequence[Union[str, "os.PathLike[str]"]], + stdout: Union[int, TextIO] = subprocess.PIPE, + stderr: Union[int, TextIO] = subprocess.PIPE, + stdin: Union[NotSetType, bytes, IO[Any], int] = CLOSE_STDIN, + **kw, + ): + """Invoke :py:class:`subprocess.Popen`. + + Calls :py:class:`subprocess.Popen` making sure the current working + directory is in ``PYTHONPATH``. + + You probably want to use :py:meth:`run` instead. + """ + env = os.environ.copy() + env["PYTHONPATH"] = os.pathsep.join( + filter(None, [os.getcwd(), env.get("PYTHONPATH", "")]) + ) + kw["env"] = env + + if stdin is self.CLOSE_STDIN: + kw["stdin"] = subprocess.PIPE + elif isinstance(stdin, bytes): + kw["stdin"] = subprocess.PIPE + else: + kw["stdin"] = stdin + + popen = subprocess.Popen(cmdargs, stdout=stdout, stderr=stderr, **kw) + if stdin is self.CLOSE_STDIN: + assert popen.stdin is not None + popen.stdin.close() + elif isinstance(stdin, bytes): + assert popen.stdin is not None + popen.stdin.write(stdin) + + return popen + + def run( + self, + *cmdargs: Union[str, "os.PathLike[str]"], + timeout: Optional[float] = None, + stdin: Union[NotSetType, bytes, IO[Any], int] = CLOSE_STDIN, + ) -> RunResult: + """Run a command with arguments. + + Run a process using :py:class:`subprocess.Popen` saving the stdout and + stderr. + + :param cmdargs: + The sequence of arguments to pass to :py:class:`subprocess.Popen`, + with path-like objects being converted to :py:class:`str` + automatically. + :param timeout: + The period in seconds after which to timeout and raise + :py:class:`Pytester.TimeoutExpired`. + :param stdin: + Optional standard input. + + - If it is :py:attr:`CLOSE_STDIN` (Default), then this method calls + :py:class:`subprocess.Popen` with ``stdin=subprocess.PIPE``, and + the standard input is closed immediately after the new command is + started. + + - If it is of type :py:class:`bytes`, these bytes are sent to the + standard input of the command. + + - Otherwise, it is passed through to :py:class:`subprocess.Popen`. + For further information in this case, consult the document of the + ``stdin`` parameter in :py:class:`subprocess.Popen`. + :returns: + The result. + """ + __tracebackhide__ = True + + cmdargs = tuple(os.fspath(arg) for arg in cmdargs) + p1 = self.path.joinpath("stdout") + p2 = self.path.joinpath("stderr") + print("running:", *cmdargs) + print(" in:", Path.cwd()) + + with p1.open("w", encoding="utf8") as f1, p2.open("w", encoding="utf8") as f2: + now = timing.time() + popen = self.popen( + cmdargs, + stdin=stdin, + stdout=f1, + stderr=f2, + close_fds=(sys.platform != "win32"), + ) + if popen.stdin is not None: + popen.stdin.close() + + def handle_timeout() -> None: + __tracebackhide__ = True + + timeout_message = ( + "{seconds} second timeout expired running:" + " {command}".format(seconds=timeout, command=cmdargs) + ) + + popen.kill() + popen.wait() + raise self.TimeoutExpired(timeout_message) + + if timeout is None: + ret = popen.wait() + else: + try: + ret = popen.wait(timeout) + except subprocess.TimeoutExpired: + handle_timeout() + + with p1.open(encoding="utf8") as f1, p2.open(encoding="utf8") as f2: + out = f1.read().splitlines() + err = f2.read().splitlines() + + self._dump_lines(out, sys.stdout) + self._dump_lines(err, sys.stderr) + + with contextlib.suppress(ValueError): + ret = ExitCode(ret) + return RunResult(ret, out, err, timing.time() - now) + + def _dump_lines(self, lines, fp): + try: + for line in lines: + print(line, file=fp) + except UnicodeEncodeError: + print(f"couldn't print to {fp} because of encoding") + + def _getpytestargs(self) -> Tuple[str, ...]: + return sys.executable, "-mpytest" + + def runpython(self, script: "os.PathLike[str]") -> RunResult: + """Run a python script using sys.executable as interpreter.""" + return self.run(sys.executable, script) + + def runpython_c(self, command: str) -> RunResult: + """Run ``python -c "command"``.""" + return self.run(sys.executable, "-c", command) + + def runpytest_subprocess( + self, *args: Union[str, "os.PathLike[str]"], timeout: Optional[float] = None + ) -> RunResult: + """Run pytest as a subprocess with given arguments. + + Any plugins added to the :py:attr:`plugins` list will be added using the + ``-p`` command line option. Additionally ``--basetemp`` is used to put + any temporary files and directories in a numbered directory prefixed + with "runpytest-" to not conflict with the normal numbered pytest + location for temporary files and directories. + + :param args: + The sequence of arguments to pass to the pytest subprocess. + :param timeout: + The period in seconds after which to timeout and raise + :py:class:`Pytester.TimeoutExpired`. + :returns: + The result. + """ + __tracebackhide__ = True + p = make_numbered_dir(root=self.path, prefix="runpytest-", mode=0o700) + args = ("--basetemp=%s" % p,) + args + plugins = [x for x in self.plugins if isinstance(x, str)] + if plugins: + args = ("-p", plugins[0]) + args + args = self._getpytestargs() + args + return self.run(*args, timeout=timeout) + + def spawn_pytest( + self, string: str, expect_timeout: float = 10.0 + ) -> "pexpect.spawn": + """Run pytest using pexpect. + + This makes sure to use the right pytest and sets up the temporary + directory locations. + + The pexpect child is returned. + """ + basetemp = self.path / "temp-pexpect" + basetemp.mkdir(mode=0o700) + invoke = " ".join(map(str, self._getpytestargs())) + cmd = f"{invoke} --basetemp={basetemp} {string}" + return self.spawn(cmd, expect_timeout=expect_timeout) + + def spawn(self, cmd: str, expect_timeout: float = 10.0) -> "pexpect.spawn": + """Run a command using pexpect. + + The pexpect child is returned. + """ + pexpect = importorskip("pexpect", "3.0") + if hasattr(sys, "pypy_version_info") and "64" in platform.machine(): + skip("pypy-64 bit not supported") + if not hasattr(pexpect, "spawn"): + skip("pexpect.spawn not available") + logfile = self.path.joinpath("spawn.out").open("wb") + + child = pexpect.spawn(cmd, logfile=logfile, timeout=expect_timeout) + self._request.addfinalizer(logfile.close) + return child + + +class LineComp: + def __init__(self) -> None: + self.stringio = StringIO() + """:class:`python:io.StringIO()` instance used for input.""" + + def assert_contains_lines(self, lines2: Sequence[str]) -> None: + """Assert that ``lines2`` are contained (linearly) in :attr:`stringio`'s value. + + Lines are matched using :func:`LineMatcher.fnmatch_lines `. + """ + __tracebackhide__ = True + val = self.stringio.getvalue() + self.stringio.truncate(0) + self.stringio.seek(0) + lines1 = val.split("\n") + LineMatcher(lines1).fnmatch_lines(lines2) + + +class LineMatcher: + """Flexible matching of text. + + This is a convenience class to test large texts like the output of + commands. + + The constructor takes a list of lines without their trailing newlines, i.e. + ``text.splitlines()``. + """ + + def __init__(self, lines: List[str]) -> None: + self.lines = lines + self._log_output: List[str] = [] + + def __str__(self) -> str: + """Return the entire original text. + + .. versionadded:: 6.2 + You can use :meth:`str` in older versions. + """ + return "\n".join(self.lines) + + def _getlines(self, lines2: Union[str, Sequence[str], Source]) -> Sequence[str]: + if isinstance(lines2, str): + lines2 = Source(lines2) + if isinstance(lines2, Source): + lines2 = lines2.strip().lines + return lines2 + + def fnmatch_lines_random(self, lines2: Sequence[str]) -> None: + """Check lines exist in the output in any order (using :func:`python:fnmatch.fnmatch`).""" + __tracebackhide__ = True + self._match_lines_random(lines2, fnmatch) + + def re_match_lines_random(self, lines2: Sequence[str]) -> None: + """Check lines exist in the output in any order (using :func:`python:re.match`).""" + __tracebackhide__ = True + self._match_lines_random(lines2, lambda name, pat: bool(re.match(pat, name))) + + def _match_lines_random( + self, lines2: Sequence[str], match_func: Callable[[str, str], bool] + ) -> None: + __tracebackhide__ = True + lines2 = self._getlines(lines2) + for line in lines2: + for x in self.lines: + if line == x or match_func(x, line): + self._log("matched: ", repr(line)) + break + else: + msg = "line %r not found in output" % line + self._log(msg) + self._fail(msg) + + def get_lines_after(self, fnline: str) -> Sequence[str]: + """Return all lines following the given line in the text. + + The given line can contain glob wildcards. + """ + for i, line in enumerate(self.lines): + if fnline == line or fnmatch(line, fnline): + return self.lines[i + 1 :] + raise ValueError("line %r not found in output" % fnline) + + def _log(self, *args) -> None: + self._log_output.append(" ".join(str(x) for x in args)) + + @property + def _log_text(self) -> str: + return "\n".join(self._log_output) + + def fnmatch_lines( + self, lines2: Sequence[str], *, consecutive: bool = False + ) -> None: + """Check lines exist in the output (using :func:`python:fnmatch.fnmatch`). + + The argument is a list of lines which have to match and can use glob + wildcards. If they do not match a pytest.fail() is called. The + matches and non-matches are also shown as part of the error message. + + :param lines2: String patterns to match. + :param consecutive: Match lines consecutively? + """ + __tracebackhide__ = True + self._match_lines(lines2, fnmatch, "fnmatch", consecutive=consecutive) + + def re_match_lines( + self, lines2: Sequence[str], *, consecutive: bool = False + ) -> None: + """Check lines exist in the output (using :func:`python:re.match`). + + The argument is a list of lines which have to match using ``re.match``. + If they do not match a pytest.fail() is called. + + The matches and non-matches are also shown as part of the error message. + + :param lines2: string patterns to match. + :param consecutive: match lines consecutively? + """ + __tracebackhide__ = True + self._match_lines( + lines2, + lambda name, pat: bool(re.match(pat, name)), + "re.match", + consecutive=consecutive, + ) + + def _match_lines( + self, + lines2: Sequence[str], + match_func: Callable[[str, str], bool], + match_nickname: str, + *, + consecutive: bool = False, + ) -> None: + """Underlying implementation of ``fnmatch_lines`` and ``re_match_lines``. + + :param Sequence[str] lines2: + List of string patterns to match. The actual format depends on + ``match_func``. + :param match_func: + A callable ``match_func(line, pattern)`` where line is the + captured line from stdout/stderr and pattern is the matching + pattern. + :param str match_nickname: + The nickname for the match function that will be logged to stdout + when a match occurs. + :param consecutive: + Match lines consecutively? + """ + if not isinstance(lines2, collections.abc.Sequence): + raise TypeError(f"invalid type for lines2: {type(lines2).__name__}") + lines2 = self._getlines(lines2) + lines1 = self.lines[:] + extralines = [] + __tracebackhide__ = True + wnick = len(match_nickname) + 1 + started = False + for line in lines2: + nomatchprinted = False + while lines1: + nextline = lines1.pop(0) + if line == nextline: + self._log("exact match:", repr(line)) + started = True + break + elif match_func(nextline, line): + self._log("%s:" % match_nickname, repr(line)) + self._log( + "{:>{width}}".format("with:", width=wnick), repr(nextline) + ) + started = True + break + else: + if consecutive and started: + msg = f"no consecutive match: {line!r}" + self._log(msg) + self._log( + "{:>{width}}".format("with:", width=wnick), repr(nextline) + ) + self._fail(msg) + if not nomatchprinted: + self._log( + "{:>{width}}".format("nomatch:", width=wnick), repr(line) + ) + nomatchprinted = True + self._log("{:>{width}}".format("and:", width=wnick), repr(nextline)) + extralines.append(nextline) + else: + msg = f"remains unmatched: {line!r}" + self._log(msg) + self._fail(msg) + self._log_output = [] + + def no_fnmatch_line(self, pat: str) -> None: + """Ensure captured lines do not match the given pattern, using ``fnmatch.fnmatch``. + + :param str pat: The pattern to match lines. + """ + __tracebackhide__ = True + self._no_match_line(pat, fnmatch, "fnmatch") + + def no_re_match_line(self, pat: str) -> None: + """Ensure captured lines do not match the given pattern, using ``re.match``. + + :param str pat: The regular expression to match lines. + """ + __tracebackhide__ = True + self._no_match_line( + pat, lambda name, pat: bool(re.match(pat, name)), "re.match" + ) + + def _no_match_line( + self, pat: str, match_func: Callable[[str, str], bool], match_nickname: str + ) -> None: + """Ensure captured lines does not have a the given pattern, using ``fnmatch.fnmatch``. + + :param str pat: The pattern to match lines. + """ + __tracebackhide__ = True + nomatch_printed = False + wnick = len(match_nickname) + 1 + for line in self.lines: + if match_func(line, pat): + msg = f"{match_nickname}: {pat!r}" + self._log(msg) + self._log("{:>{width}}".format("with:", width=wnick), repr(line)) + self._fail(msg) + else: + if not nomatch_printed: + self._log("{:>{width}}".format("nomatch:", width=wnick), repr(pat)) + nomatch_printed = True + self._log("{:>{width}}".format("and:", width=wnick), repr(line)) + self._log_output = [] + + def _fail(self, msg: str) -> None: + __tracebackhide__ = True + log_text = self._log_text + self._log_output = [] + fail(log_text) + + def str(self) -> str: + """Return the entire original text.""" + return str(self) diff --git a/venv/lib/python3.12/site-packages/_pytest/pytester_assertions.py b/venv/lib/python3.12/site-packages/_pytest/pytester_assertions.py new file mode 100644 index 0000000000..657e4db5fc --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/pytester_assertions.py @@ -0,0 +1,75 @@ +"""Helper plugin for pytester; should not be loaded on its own.""" +# This plugin contains assertions used by pytester. pytester cannot +# contain them itself, since it is imported by the `pytest` module, +# hence cannot be subject to assertion rewriting, which requires a +# module to not be already imported. +from typing import Dict +from typing import Optional +from typing import Sequence +from typing import Tuple +from typing import Union + +from _pytest.reports import CollectReport +from _pytest.reports import TestReport + + +def assertoutcome( + outcomes: Tuple[ + Sequence[TestReport], + Sequence[Union[CollectReport, TestReport]], + Sequence[Union[CollectReport, TestReport]], + ], + passed: int = 0, + skipped: int = 0, + failed: int = 0, +) -> None: + __tracebackhide__ = True + + realpassed, realskipped, realfailed = outcomes + obtained = { + "passed": len(realpassed), + "skipped": len(realskipped), + "failed": len(realfailed), + } + expected = {"passed": passed, "skipped": skipped, "failed": failed} + assert obtained == expected, outcomes + + +def assert_outcomes( + outcomes: Dict[str, int], + passed: int = 0, + skipped: int = 0, + failed: int = 0, + errors: int = 0, + xpassed: int = 0, + xfailed: int = 0, + warnings: Optional[int] = None, + deselected: Optional[int] = None, +) -> None: + """Assert that the specified outcomes appear with the respective + numbers (0 means it didn't occur) in the text output from a test run.""" + __tracebackhide__ = True + + obtained = { + "passed": outcomes.get("passed", 0), + "skipped": outcomes.get("skipped", 0), + "failed": outcomes.get("failed", 0), + "errors": outcomes.get("errors", 0), + "xpassed": outcomes.get("xpassed", 0), + "xfailed": outcomes.get("xfailed", 0), + } + expected = { + "passed": passed, + "skipped": skipped, + "failed": failed, + "errors": errors, + "xpassed": xpassed, + "xfailed": xfailed, + } + if warnings is not None: + obtained["warnings"] = outcomes.get("warnings", 0) + expected["warnings"] = warnings + if deselected is not None: + obtained["deselected"] = outcomes.get("deselected", 0) + expected["deselected"] = deselected + assert obtained == expected diff --git a/venv/lib/python3.12/site-packages/_pytest/python.py b/venv/lib/python3.12/site-packages/_pytest/python.py new file mode 100644 index 0000000000..5f8be5d9b9 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/python.py @@ -0,0 +1,1843 @@ +"""Python test discovery, setup and run of test functions.""" +import dataclasses +import enum +import fnmatch +import inspect +import itertools +import os +import sys +import types +import warnings +from collections import Counter +from collections import defaultdict +from functools import partial +from pathlib import Path +from typing import Any +from typing import Callable +from typing import Dict +from typing import Generator +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Mapping +from typing import Optional +from typing import Pattern +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union + +import _pytest +from _pytest import fixtures +from _pytest import nodes +from _pytest._code import filter_traceback +from _pytest._code import getfslineno +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import TerminalRepr +from _pytest._code.code import Traceback +from _pytest._io import TerminalWriter +from _pytest._io.saferepr import saferepr +from _pytest.compat import ascii_escaped +from _pytest.compat import assert_never +from _pytest.compat import final +from _pytest.compat import get_default_arg_names +from _pytest.compat import get_real_func +from _pytest.compat import getimfunc +from _pytest.compat import getlocation +from _pytest.compat import is_async_function +from _pytest.compat import is_generator +from _pytest.compat import LEGACY_PATH +from _pytest.compat import NOTSET +from _pytest.compat import safe_getattr +from _pytest.compat import safe_isclass +from _pytest.compat import STRING_TYPES +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.deprecated import INSTANCE_COLLECTOR +from _pytest.deprecated import NOSE_SUPPORT_METHOD +from _pytest.fixtures import FuncFixtureInfo +from _pytest.main import Session +from _pytest.mark import MARK_GEN +from _pytest.mark import ParameterSet +from _pytest.mark.structures import get_unpacked_marks +from _pytest.mark.structures import Mark +from _pytest.mark.structures import MarkDecorator +from _pytest.mark.structures import normalize_mark_list +from _pytest.outcomes import fail +from _pytest.outcomes import skip +from _pytest.pathlib import bestrelpath +from _pytest.pathlib import fnmatch_ex +from _pytest.pathlib import import_path +from _pytest.pathlib import ImportPathMismatchError +from _pytest.pathlib import parts +from _pytest.pathlib import visit +from _pytest.scope import Scope +from _pytest.warning_types import PytestCollectionWarning +from _pytest.warning_types import PytestReturnNotNoneWarning +from _pytest.warning_types import PytestUnhandledCoroutineWarning + +if TYPE_CHECKING: + from typing_extensions import Literal + + from _pytest.scope import _ScopeName + + +_PYTEST_DIR = Path(_pytest.__file__).parent + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group.addoption( + "--fixtures", + "--funcargs", + action="store_true", + dest="showfixtures", + default=False, + help="Show available fixtures, sorted by plugin appearance " + "(fixtures with leading '_' are only shown with '-v')", + ) + group.addoption( + "--fixtures-per-test", + action="store_true", + dest="show_fixtures_per_test", + default=False, + help="Show fixtures per test", + ) + parser.addini( + "python_files", + type="args", + # NOTE: default is also used in AssertionRewritingHook. + default=["test_*.py", "*_test.py"], + help="Glob-style file patterns for Python test module discovery", + ) + parser.addini( + "python_classes", + type="args", + default=["Test"], + help="Prefixes or glob names for Python test class discovery", + ) + parser.addini( + "python_functions", + type="args", + default=["test"], + help="Prefixes or glob names for Python test function and method discovery", + ) + parser.addini( + "disable_test_id_escaping_and_forfeit_all_rights_to_community_support", + type="bool", + default=False, + help="Disable string escape non-ASCII characters, might cause unwanted " + "side effects(use at your own risk)", + ) + + +def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]: + if config.option.showfixtures: + showfixtures(config) + return 0 + if config.option.show_fixtures_per_test: + show_fixtures_per_test(config) + return 0 + return None + + +def pytest_generate_tests(metafunc: "Metafunc") -> None: + for marker in metafunc.definition.iter_markers(name="parametrize"): + metafunc.parametrize(*marker.args, **marker.kwargs, _param_mark=marker) + + +def pytest_configure(config: Config) -> None: + config.addinivalue_line( + "markers", + "parametrize(argnames, argvalues): call a test function multiple " + "times passing in different arguments in turn. argvalues generally " + "needs to be a list of values if argnames specifies only one name " + "or a list of tuples of values if argnames specifies multiple names. " + "Example: @parametrize('arg1', [1,2]) would lead to two calls of the " + "decorated test function, one with arg1=1 and another with arg1=2." + "see https://docs.pytest.org/en/stable/how-to/parametrize.html for more info " + "and examples.", + ) + config.addinivalue_line( + "markers", + "usefixtures(fixturename1, fixturename2, ...): mark tests as needing " + "all of the specified fixtures. see " + "https://docs.pytest.org/en/stable/explanation/fixtures.html#usefixtures ", + ) + + +def async_warn_and_skip(nodeid: str) -> None: + msg = "async def functions are not natively supported and have been skipped.\n" + msg += ( + "You need to install a suitable plugin for your async framework, for example:\n" + ) + msg += " - anyio\n" + msg += " - pytest-asyncio\n" + msg += " - pytest-tornasync\n" + msg += " - pytest-trio\n" + msg += " - pytest-twisted" + warnings.warn(PytestUnhandledCoroutineWarning(msg.format(nodeid))) + skip(reason="async def function and no async plugin installed (see warnings)") + + +@hookimpl(trylast=True) +def pytest_pyfunc_call(pyfuncitem: "Function") -> Optional[object]: + testfunction = pyfuncitem.obj + if is_async_function(testfunction): + async_warn_and_skip(pyfuncitem.nodeid) + funcargs = pyfuncitem.funcargs + testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames} + result = testfunction(**testargs) + if hasattr(result, "__await__") or hasattr(result, "__aiter__"): + async_warn_and_skip(pyfuncitem.nodeid) + elif result is not None: + warnings.warn( + PytestReturnNotNoneWarning( + f"Expected None, but {pyfuncitem.nodeid} returned {result!r}, which will be an error in a " + "future version of pytest. Did you mean to use `assert` instead of `return`?" + ) + ) + return True + + +def pytest_collect_file(file_path: Path, parent: nodes.Collector) -> Optional["Module"]: + if file_path.suffix == ".py": + if not parent.session.isinitpath(file_path): + if not path_matches_patterns( + file_path, parent.config.getini("python_files") + ["__init__.py"] + ): + return None + ihook = parent.session.gethookproxy(file_path) + module: Module = ihook.pytest_pycollect_makemodule( + module_path=file_path, parent=parent + ) + return module + return None + + +def path_matches_patterns(path: Path, patterns: Iterable[str]) -> bool: + """Return whether path matches any of the patterns in the list of globs given.""" + return any(fnmatch_ex(pattern, path) for pattern in patterns) + + +def pytest_pycollect_makemodule(module_path: Path, parent) -> "Module": + if module_path.name == "__init__.py": + pkg: Package = Package.from_parent(parent, path=module_path) + return pkg + mod: Module = Module.from_parent(parent, path=module_path) + return mod + + +@hookimpl(trylast=True) +def pytest_pycollect_makeitem( + collector: Union["Module", "Class"], name: str, obj: object +) -> Union[None, nodes.Item, nodes.Collector, List[Union[nodes.Item, nodes.Collector]]]: + assert isinstance(collector, (Class, Module)), type(collector) + # Nothing was collected elsewhere, let's do it here. + if safe_isclass(obj): + if collector.istestclass(obj, name): + klass: Class = Class.from_parent(collector, name=name, obj=obj) + return klass + elif collector.istestfunction(obj, name): + # mock seems to store unbound methods (issue473), normalize it. + obj = getattr(obj, "__func__", obj) + # We need to try and unwrap the function if it's a functools.partial + # or a functools.wrapped. + # We mustn't if it's been wrapped with mock.patch (python 2 only). + if not (inspect.isfunction(obj) or inspect.isfunction(get_real_func(obj))): + filename, lineno = getfslineno(obj) + warnings.warn_explicit( + message=PytestCollectionWarning( + "cannot collect %r because it is not a function." % name + ), + category=None, + filename=str(filename), + lineno=lineno + 1, + ) + elif getattr(obj, "__test__", True): + if is_generator(obj): + res: Function = Function.from_parent(collector, name=name) + reason = "yield tests were removed in pytest 4.0 - {name} will be ignored".format( + name=name + ) + res.add_marker(MARK_GEN.xfail(run=False, reason=reason)) + res.warn(PytestCollectionWarning(reason)) + return res + else: + return list(collector._genfunctions(name, obj)) + return None + + +class PyobjMixin(nodes.Node): + """this mix-in inherits from Node to carry over the typing information + + as its intended to always mix in before a node + its position in the mro is unaffected""" + + _ALLOW_MARKERS = True + + @property + def module(self): + """Python module object this node was collected from (can be None).""" + node = self.getparent(Module) + return node.obj if node is not None else None + + @property + def cls(self): + """Python class object this node was collected from (can be None).""" + node = self.getparent(Class) + return node.obj if node is not None else None + + @property + def instance(self): + """Python instance object the function is bound to. + + Returns None if not a test method, e.g. for a standalone test function, + a staticmethod, a class or a module. + """ + node = self.getparent(Function) + return getattr(node.obj, "__self__", None) if node is not None else None + + @property + def obj(self): + """Underlying Python object.""" + obj = getattr(self, "_obj", None) + if obj is None: + self._obj = obj = self._getobj() + # XXX evil hack + # used to avoid Function marker duplication + if self._ALLOW_MARKERS: + self.own_markers.extend(get_unpacked_marks(self.obj)) + # This assumes that `obj` is called before there is a chance + # to add custom keys to `self.keywords`, so no fear of overriding. + self.keywords.update((mark.name, mark) for mark in self.own_markers) + return obj + + @obj.setter + def obj(self, value): + self._obj = value + + def _getobj(self): + """Get the underlying Python object. May be overwritten by subclasses.""" + # TODO: Improve the type of `parent` such that assert/ignore aren't needed. + assert self.parent is not None + obj = self.parent.obj # type: ignore[attr-defined] + return getattr(obj, self.name) + + def getmodpath(self, stopatmodule: bool = True, includemodule: bool = False) -> str: + """Return Python path relative to the containing module.""" + chain = self.listchain() + chain.reverse() + parts = [] + for node in chain: + name = node.name + if isinstance(node, Module): + name = os.path.splitext(name)[0] + if stopatmodule: + if includemodule: + parts.append(name) + break + parts.append(name) + parts.reverse() + return ".".join(parts) + + def reportinfo(self) -> Tuple[Union["os.PathLike[str]", str], Optional[int], str]: + # XXX caching? + obj = self.obj + compat_co_firstlineno = getattr(obj, "compat_co_firstlineno", None) + if isinstance(compat_co_firstlineno, int): + # nose compatibility + file_path = sys.modules[obj.__module__].__file__ + assert file_path is not None + if file_path.endswith(".pyc"): + file_path = file_path[:-1] + path: Union["os.PathLike[str]", str] = file_path + lineno = compat_co_firstlineno + else: + path, lineno = getfslineno(obj) + modpath = self.getmodpath() + assert isinstance(lineno, int) + return path, lineno, modpath + + +# As an optimization, these builtin attribute names are pre-ignored when +# iterating over an object during collection -- the pytest_pycollect_makeitem +# hook is not called for them. +# fmt: off +class _EmptyClass: pass # noqa: E701 +IGNORED_ATTRIBUTES = frozenset.union( # noqa: E305 + frozenset(), + # Module. + dir(types.ModuleType("empty_module")), + # Some extra module attributes the above doesn't catch. + {"__builtins__", "__file__", "__cached__"}, + # Class. + dir(_EmptyClass), + # Instance. + dir(_EmptyClass()), +) +del _EmptyClass +# fmt: on + + +class PyCollector(PyobjMixin, nodes.Collector): + def funcnamefilter(self, name: str) -> bool: + return self._matches_prefix_or_glob_option("python_functions", name) + + def isnosetest(self, obj: object) -> bool: + """Look for the __test__ attribute, which is applied by the + @nose.tools.istest decorator. + """ + # We explicitly check for "is True" here to not mistakenly treat + # classes with a custom __getattr__ returning something truthy (like a + # function) as test classes. + return safe_getattr(obj, "__test__", False) is True + + def classnamefilter(self, name: str) -> bool: + return self._matches_prefix_or_glob_option("python_classes", name) + + def istestfunction(self, obj: object, name: str) -> bool: + if self.funcnamefilter(name) or self.isnosetest(obj): + if isinstance(obj, (staticmethod, classmethod)): + # staticmethods and classmethods need to be unwrapped. + obj = safe_getattr(obj, "__func__", False) + return callable(obj) and fixtures.getfixturemarker(obj) is None + else: + return False + + def istestclass(self, obj: object, name: str) -> bool: + return self.classnamefilter(name) or self.isnosetest(obj) + + def _matches_prefix_or_glob_option(self, option_name: str, name: str) -> bool: + """Check if the given name matches the prefix or glob-pattern defined + in ini configuration.""" + for option in self.config.getini(option_name): + if name.startswith(option): + return True + # Check that name looks like a glob-string before calling fnmatch + # because this is called for every name in each collected module, + # and fnmatch is somewhat expensive to call. + elif ("*" in option or "?" in option or "[" in option) and fnmatch.fnmatch( + name, option + ): + return True + return False + + def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]: + if not getattr(self.obj, "__test__", True): + return [] + + # Avoid random getattrs and peek in the __dict__ instead. + dicts = [getattr(self.obj, "__dict__", {})] + if isinstance(self.obj, type): + for basecls in self.obj.__mro__: + dicts.append(basecls.__dict__) + + # In each class, nodes should be definition ordered. + # __dict__ is definition ordered. + seen: Set[str] = set() + dict_values: List[List[Union[nodes.Item, nodes.Collector]]] = [] + ihook = self.ihook + for dic in dicts: + values: List[Union[nodes.Item, nodes.Collector]] = [] + # Note: seems like the dict can change during iteration - + # be careful not to remove the list() without consideration. + for name, obj in list(dic.items()): + if name in IGNORED_ATTRIBUTES: + continue + if name in seen: + continue + seen.add(name) + res = ihook.pytest_pycollect_makeitem( + collector=self, name=name, obj=obj + ) + if res is None: + continue + elif isinstance(res, list): + values.extend(res) + else: + values.append(res) + dict_values.append(values) + + # Between classes in the class hierarchy, reverse-MRO order -- nodes + # inherited from base classes should come before subclasses. + result = [] + for values in reversed(dict_values): + result.extend(values) + return result + + def _genfunctions(self, name: str, funcobj) -> Iterator["Function"]: + modulecol = self.getparent(Module) + assert modulecol is not None + module = modulecol.obj + clscol = self.getparent(Class) + cls = clscol and clscol.obj or None + + definition = FunctionDefinition.from_parent(self, name=name, callobj=funcobj) + fixtureinfo = definition._fixtureinfo + + # pytest_generate_tests impls call metafunc.parametrize() which fills + # metafunc._calls, the outcome of the hook. + metafunc = Metafunc( + definition=definition, + fixtureinfo=fixtureinfo, + config=self.config, + cls=cls, + module=module, + _ispytest=True, + ) + methods = [] + if hasattr(module, "pytest_generate_tests"): + methods.append(module.pytest_generate_tests) + if cls is not None and hasattr(cls, "pytest_generate_tests"): + methods.append(cls().pytest_generate_tests) + self.ihook.pytest_generate_tests.call_extra(methods, dict(metafunc=metafunc)) + + if not metafunc._calls: + yield Function.from_parent(self, name=name, fixtureinfo=fixtureinfo) + else: + # Add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs. + fm = self.session._fixturemanager + fixtures.add_funcarg_pseudo_fixture_def(self, metafunc, fm) + + # Add_funcarg_pseudo_fixture_def may have shadowed some fixtures + # with direct parametrization, so make sure we update what the + # function really needs. + fixtureinfo.prune_dependency_tree() + + for callspec in metafunc._calls: + subname = f"{name}[{callspec.id}]" + yield Function.from_parent( + self, + name=subname, + callspec=callspec, + fixtureinfo=fixtureinfo, + keywords={callspec.id: True}, + originalname=name, + ) + + +class Module(nodes.File, PyCollector): + """Collector for test classes and functions in a Python module.""" + + def _getobj(self): + return self._importtestmodule() + + def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]: + self._inject_setup_module_fixture() + self._inject_setup_function_fixture() + self.session._fixturemanager.parsefactories(self) + return super().collect() + + def _inject_setup_module_fixture(self) -> None: + """Inject a hidden autouse, module scoped fixture into the collected module object + that invokes setUpModule/tearDownModule if either or both are available. + + Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + has_nose = self.config.pluginmanager.has_plugin("nose") + setup_module = _get_first_non_fixture_func( + self.obj, ("setUpModule", "setup_module") + ) + if setup_module is None and has_nose: + # The name "setup" is too common - only treat as fixture if callable. + setup_module = _get_first_non_fixture_func(self.obj, ("setup",)) + if not callable(setup_module): + setup_module = None + teardown_module = _get_first_non_fixture_func( + self.obj, ("tearDownModule", "teardown_module") + ) + if teardown_module is None and has_nose: + teardown_module = _get_first_non_fixture_func(self.obj, ("teardown",)) + # Same as "setup" above - only treat as fixture if callable. + if not callable(teardown_module): + teardown_module = None + + if setup_module is None and teardown_module is None: + return + + @fixtures.fixture( + autouse=True, + scope="module", + # Use a unique name to speed up lookup. + name=f"_xunit_setup_module_fixture_{self.obj.__name__}", + ) + def xunit_setup_module_fixture(request) -> Generator[None, None, None]: + if setup_module is not None: + _call_with_optional_argument(setup_module, request.module) + yield + if teardown_module is not None: + _call_with_optional_argument(teardown_module, request.module) + + self.obj.__pytest_setup_module = xunit_setup_module_fixture + + def _inject_setup_function_fixture(self) -> None: + """Inject a hidden autouse, function scoped fixture into the collected module object + that invokes setup_function/teardown_function if either or both are available. + + Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + setup_function = _get_first_non_fixture_func(self.obj, ("setup_function",)) + teardown_function = _get_first_non_fixture_func( + self.obj, ("teardown_function",) + ) + if setup_function is None and teardown_function is None: + return + + @fixtures.fixture( + autouse=True, + scope="function", + # Use a unique name to speed up lookup. + name=f"_xunit_setup_function_fixture_{self.obj.__name__}", + ) + def xunit_setup_function_fixture(request) -> Generator[None, None, None]: + if request.instance is not None: + # in this case we are bound to an instance, so we need to let + # setup_method handle this + yield + return + if setup_function is not None: + _call_with_optional_argument(setup_function, request.function) + yield + if teardown_function is not None: + _call_with_optional_argument(teardown_function, request.function) + + self.obj.__pytest_setup_function = xunit_setup_function_fixture + + def _importtestmodule(self): + # We assume we are only called once per module. + importmode = self.config.getoption("--import-mode") + try: + mod = import_path(self.path, mode=importmode, root=self.config.rootpath) + except SyntaxError as e: + raise self.CollectError( + ExceptionInfo.from_current().getrepr(style="short") + ) from e + except ImportPathMismatchError as e: + raise self.CollectError( + "import file mismatch:\n" + "imported module %r has this __file__ attribute:\n" + " %s\n" + "which is not the same as the test file we want to collect:\n" + " %s\n" + "HINT: remove __pycache__ / .pyc files and/or use a " + "unique basename for your test file modules" % e.args + ) from e + except ImportError as e: + exc_info = ExceptionInfo.from_current() + if self.config.getoption("verbose") < 2: + exc_info.traceback = exc_info.traceback.filter(filter_traceback) + exc_repr = ( + exc_info.getrepr(style="short") + if exc_info.traceback + else exc_info.exconly() + ) + formatted_tb = str(exc_repr) + raise self.CollectError( + "ImportError while importing test module '{path}'.\n" + "Hint: make sure your test modules/packages have valid Python names.\n" + "Traceback:\n" + "{traceback}".format(path=self.path, traceback=formatted_tb) + ) from e + except skip.Exception as e: + if e.allow_module_level: + raise + raise self.CollectError( + "Using pytest.skip outside of a test will skip the entire module. " + "If that's your intention, pass `allow_module_level=True`. " + "If you want to skip a specific test or an entire class, " + "use the @pytest.mark.skip or @pytest.mark.skipif decorators." + ) from e + self.config.pluginmanager.consider_module(mod) + return mod + + +class Package(Module): + """Collector for files and directories in a Python packages -- directories + with an `__init__.py` file.""" + + def __init__( + self, + fspath: Optional[LEGACY_PATH], + parent: nodes.Collector, + # NOTE: following args are unused: + config=None, + session=None, + nodeid=None, + path: Optional[Path] = None, + ) -> None: + # NOTE: Could be just the following, but kept as-is for compat. + # nodes.FSCollector.__init__(self, fspath, parent=parent) + session = parent.session + nodes.FSCollector.__init__( + self, + fspath=fspath, + path=path, + parent=parent, + config=config, + session=session, + nodeid=nodeid, + ) + self.name = self.path.parent.name + + def setup(self) -> None: + # Not using fixtures to call setup_module here because autouse fixtures + # from packages are not called automatically (#4085). + setup_module = _get_first_non_fixture_func( + self.obj, ("setUpModule", "setup_module") + ) + if setup_module is not None: + _call_with_optional_argument(setup_module, self.obj) + + teardown_module = _get_first_non_fixture_func( + self.obj, ("tearDownModule", "teardown_module") + ) + if teardown_module is not None: + func = partial(_call_with_optional_argument, teardown_module, self.obj) + self.addfinalizer(func) + + def _recurse(self, direntry: "os.DirEntry[str]") -> bool: + if direntry.name == "__pycache__": + return False + fspath = Path(direntry.path) + ihook = self.session.gethookproxy(fspath.parent) + if ihook.pytest_ignore_collect(collection_path=fspath, config=self.config): + return False + return True + + def _collectfile( + self, fspath: Path, handle_dupes: bool = True + ) -> Sequence[nodes.Collector]: + assert ( + fspath.is_file() + ), "{!r} is not a file (isdir={!r}, exists={!r}, islink={!r})".format( + fspath, fspath.is_dir(), fspath.exists(), fspath.is_symlink() + ) + ihook = self.session.gethookproxy(fspath) + if not self.session.isinitpath(fspath): + if ihook.pytest_ignore_collect(collection_path=fspath, config=self.config): + return () + + if handle_dupes: + keepduplicates = self.config.getoption("keepduplicates") + if not keepduplicates: + duplicate_paths = self.config.pluginmanager._duplicatepaths + if fspath in duplicate_paths: + return () + else: + duplicate_paths.add(fspath) + + return ihook.pytest_collect_file(file_path=fspath, parent=self) # type: ignore[no-any-return] + + def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]: + this_path = self.path.parent + + # Always collect the __init__ first. + if path_matches_patterns(self.path, self.config.getini("python_files")): + yield Module.from_parent(self, path=self.path) + + pkg_prefixes: Set[Path] = set() + for direntry in visit(str(this_path), recurse=self._recurse): + path = Path(direntry.path) + + # We will visit our own __init__.py file, in which case we skip it. + if direntry.is_file(): + if direntry.name == "__init__.py" and path.parent == this_path: + continue + + parts_ = parts(direntry.path) + if any( + str(pkg_prefix) in parts_ and pkg_prefix / "__init__.py" != path + for pkg_prefix in pkg_prefixes + ): + continue + + if direntry.is_file(): + yield from self._collectfile(path) + elif not direntry.is_dir(): + # Broken symlink or invalid/missing file. + continue + elif path.joinpath("__init__.py").is_file(): + pkg_prefixes.add(path) + + +def _call_with_optional_argument(func, arg) -> None: + """Call the given function with the given argument if func accepts one argument, otherwise + calls func without arguments.""" + arg_count = func.__code__.co_argcount + if inspect.ismethod(func): + arg_count -= 1 + if arg_count: + func(arg) + else: + func() + + +def _get_first_non_fixture_func(obj: object, names: Iterable[str]) -> Optional[object]: + """Return the attribute from the given object to be used as a setup/teardown + xunit-style function, but only if not marked as a fixture to avoid calling it twice. + """ + for name in names: + meth: Optional[object] = getattr(obj, name, None) + if meth is not None and fixtures.getfixturemarker(meth) is None: + return meth + return None + + +class Class(PyCollector): + """Collector for test methods (and nested classes) in a Python class.""" + + @classmethod + def from_parent(cls, parent, *, name, obj=None, **kw): + """The public constructor.""" + return super().from_parent(name=name, parent=parent, **kw) + + def newinstance(self): + return self.obj() + + def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]: + if not safe_getattr(self.obj, "__test__", True): + return [] + if hasinit(self.obj): + assert self.parent is not None + self.warn( + PytestCollectionWarning( + "cannot collect test class %r because it has a " + "__init__ constructor (from: %s)" + % (self.obj.__name__, self.parent.nodeid) + ) + ) + return [] + elif hasnew(self.obj): + assert self.parent is not None + self.warn( + PytestCollectionWarning( + "cannot collect test class %r because it has a " + "__new__ constructor (from: %s)" + % (self.obj.__name__, self.parent.nodeid) + ) + ) + return [] + + self._inject_setup_class_fixture() + self._inject_setup_method_fixture() + + self.session._fixturemanager.parsefactories(self.newinstance(), self.nodeid) + + return super().collect() + + def _inject_setup_class_fixture(self) -> None: + """Inject a hidden autouse, class scoped fixture into the collected class object + that invokes setup_class/teardown_class if either or both are available. + + Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + setup_class = _get_first_non_fixture_func(self.obj, ("setup_class",)) + teardown_class = _get_first_non_fixture_func(self.obj, ("teardown_class",)) + if setup_class is None and teardown_class is None: + return + + @fixtures.fixture( + autouse=True, + scope="class", + # Use a unique name to speed up lookup. + name=f"_xunit_setup_class_fixture_{self.obj.__qualname__}", + ) + def xunit_setup_class_fixture(cls) -> Generator[None, None, None]: + if setup_class is not None: + func = getimfunc(setup_class) + _call_with_optional_argument(func, self.obj) + yield + if teardown_class is not None: + func = getimfunc(teardown_class) + _call_with_optional_argument(func, self.obj) + + self.obj.__pytest_setup_class = xunit_setup_class_fixture + + def _inject_setup_method_fixture(self) -> None: + """Inject a hidden autouse, function scoped fixture into the collected class object + that invokes setup_method/teardown_method if either or both are available. + + Using a fixture to invoke these methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + has_nose = self.config.pluginmanager.has_plugin("nose") + setup_name = "setup_method" + setup_method = _get_first_non_fixture_func(self.obj, (setup_name,)) + emit_nose_setup_warning = False + if setup_method is None and has_nose: + setup_name = "setup" + emit_nose_setup_warning = True + setup_method = _get_first_non_fixture_func(self.obj, (setup_name,)) + teardown_name = "teardown_method" + teardown_method = _get_first_non_fixture_func(self.obj, (teardown_name,)) + emit_nose_teardown_warning = False + if teardown_method is None and has_nose: + teardown_name = "teardown" + emit_nose_teardown_warning = True + teardown_method = _get_first_non_fixture_func(self.obj, (teardown_name,)) + if setup_method is None and teardown_method is None: + return + + @fixtures.fixture( + autouse=True, + scope="function", + # Use a unique name to speed up lookup. + name=f"_xunit_setup_method_fixture_{self.obj.__qualname__}", + ) + def xunit_setup_method_fixture(self, request) -> Generator[None, None, None]: + method = request.function + if setup_method is not None: + func = getattr(self, setup_name) + _call_with_optional_argument(func, method) + if emit_nose_setup_warning: + warnings.warn( + NOSE_SUPPORT_METHOD.format( + nodeid=request.node.nodeid, method="setup" + ), + stacklevel=2, + ) + yield + if teardown_method is not None: + func = getattr(self, teardown_name) + _call_with_optional_argument(func, method) + if emit_nose_teardown_warning: + warnings.warn( + NOSE_SUPPORT_METHOD.format( + nodeid=request.node.nodeid, method="teardown" + ), + stacklevel=2, + ) + + self.obj.__pytest_setup_method = xunit_setup_method_fixture + + +class InstanceDummy: + """Instance used to be a node type between Class and Function. It has been + removed in pytest 7.0. Some plugins exist which reference `pytest.Instance` + only to ignore it; this dummy class keeps them working. This will be removed + in pytest 8.""" + + +def __getattr__(name: str) -> object: + if name == "Instance": + warnings.warn(INSTANCE_COLLECTOR, 2) + return InstanceDummy + raise AttributeError(f"module {__name__} has no attribute {name}") + + +def hasinit(obj: object) -> bool: + init: object = getattr(obj, "__init__", None) + if init: + return init != object.__init__ + return False + + +def hasnew(obj: object) -> bool: + new: object = getattr(obj, "__new__", None) + if new: + return new != object.__new__ + return False + + +@final +@dataclasses.dataclass(frozen=True) +class IdMaker: + """Make IDs for a parametrization.""" + + __slots__ = ( + "argnames", + "parametersets", + "idfn", + "ids", + "config", + "nodeid", + "func_name", + ) + + # The argnames of the parametrization. + argnames: Sequence[str] + # The ParameterSets of the parametrization. + parametersets: Sequence[ParameterSet] + # Optionally, a user-provided callable to make IDs for parameters in a + # ParameterSet. + idfn: Optional[Callable[[Any], Optional[object]]] + # Optionally, explicit IDs for ParameterSets by index. + ids: Optional[Sequence[Optional[object]]] + # Optionally, the pytest config. + # Used for controlling ASCII escaping, and for calling the + # :hook:`pytest_make_parametrize_id` hook. + config: Optional[Config] + # Optionally, the ID of the node being parametrized. + # Used only for clearer error messages. + nodeid: Optional[str] + # Optionally, the ID of the function being parametrized. + # Used only for clearer error messages. + func_name: Optional[str] + + def make_unique_parameterset_ids(self) -> List[str]: + """Make a unique identifier for each ParameterSet, that may be used to + identify the parametrization in a node ID. + + Format is -...-[counter], where prm_x_token is + - user-provided id, if given + - else an id derived from the value, applicable for certain types + - else + The counter suffix is appended only in case a string wouldn't be unique + otherwise. + """ + resolved_ids = list(self._resolve_ids()) + # All IDs must be unique! + if len(resolved_ids) != len(set(resolved_ids)): + # Record the number of occurrences of each ID. + id_counts = Counter(resolved_ids) + # Map the ID to its next suffix. + id_suffixes: Dict[str, int] = defaultdict(int) + # Suffix non-unique IDs to make them unique. + for index, id in enumerate(resolved_ids): + if id_counts[id] > 1: + resolved_ids[index] = f"{id}{id_suffixes[id]}" + id_suffixes[id] += 1 + return resolved_ids + + def _resolve_ids(self) -> Iterable[str]: + """Resolve IDs for all ParameterSets (may contain duplicates).""" + for idx, parameterset in enumerate(self.parametersets): + if parameterset.id is not None: + # ID provided directly - pytest.param(..., id="...") + yield parameterset.id + elif self.ids and idx < len(self.ids) and self.ids[idx] is not None: + # ID provided in the IDs list - parametrize(..., ids=[...]). + yield self._idval_from_value_required(self.ids[idx], idx) + else: + # ID not provided - generate it. + yield "-".join( + self._idval(val, argname, idx) + for val, argname in zip(parameterset.values, self.argnames) + ) + + def _idval(self, val: object, argname: str, idx: int) -> str: + """Make an ID for a parameter in a ParameterSet.""" + idval = self._idval_from_function(val, argname, idx) + if idval is not None: + return idval + idval = self._idval_from_hook(val, argname) + if idval is not None: + return idval + idval = self._idval_from_value(val) + if idval is not None: + return idval + return self._idval_from_argname(argname, idx) + + def _idval_from_function( + self, val: object, argname: str, idx: int + ) -> Optional[str]: + """Try to make an ID for a parameter in a ParameterSet using the + user-provided id callable, if given.""" + if self.idfn is None: + return None + try: + id = self.idfn(val) + except Exception as e: + prefix = f"{self.nodeid}: " if self.nodeid is not None else "" + msg = "error raised while trying to determine id of parameter '{}' at position {}" + msg = prefix + msg.format(argname, idx) + raise ValueError(msg) from e + if id is None: + return None + return self._idval_from_value(id) + + def _idval_from_hook(self, val: object, argname: str) -> Optional[str]: + """Try to make an ID for a parameter in a ParameterSet by calling the + :hook:`pytest_make_parametrize_id` hook.""" + if self.config: + id: Optional[str] = self.config.hook.pytest_make_parametrize_id( + config=self.config, val=val, argname=argname + ) + return id + return None + + def _idval_from_value(self, val: object) -> Optional[str]: + """Try to make an ID for a parameter in a ParameterSet from its value, + if the value type is supported.""" + if isinstance(val, STRING_TYPES): + return _ascii_escaped_by_config(val, self.config) + elif val is None or isinstance(val, (float, int, bool, complex)): + return str(val) + elif isinstance(val, Pattern): + return ascii_escaped(val.pattern) + elif val is NOTSET: + # Fallback to default. Note that NOTSET is an enum.Enum. + pass + elif isinstance(val, enum.Enum): + return str(val) + elif isinstance(getattr(val, "__name__", None), str): + # Name of a class, function, module, etc. + name: str = getattr(val, "__name__") + return name + return None + + def _idval_from_value_required(self, val: object, idx: int) -> str: + """Like _idval_from_value(), but fails if the type is not supported.""" + id = self._idval_from_value(val) + if id is not None: + return id + + # Fail. + if self.func_name is not None: + prefix = f"In {self.func_name}: " + elif self.nodeid is not None: + prefix = f"In {self.nodeid}: " + else: + prefix = "" + msg = ( + f"{prefix}ids contains unsupported value {saferepr(val)} (type: {type(val)!r}) at index {idx}. " + "Supported types are: str, bytes, int, float, complex, bool, enum, regex or anything with a __name__." + ) + fail(msg, pytrace=False) + + @staticmethod + def _idval_from_argname(argname: str, idx: int) -> str: + """Make an ID for a parameter in a ParameterSet from the argument name + and the index of the ParameterSet.""" + return str(argname) + str(idx) + + +@final +@dataclasses.dataclass(frozen=True) +class CallSpec2: + """A planned parameterized invocation of a test function. + + Calculated during collection for a given test function's Metafunc. + Once collection is over, each callspec is turned into a single Item + and stored in item.callspec. + """ + + # arg name -> arg value which will be passed to the parametrized test + # function (direct parameterization). + funcargs: Dict[str, object] = dataclasses.field(default_factory=dict) + # arg name -> arg value which will be passed to a fixture of the same name + # (indirect parametrization). + params: Dict[str, object] = dataclasses.field(default_factory=dict) + # arg name -> arg index. + indices: Dict[str, int] = dataclasses.field(default_factory=dict) + # Used for sorting parametrized resources. + _arg2scope: Dict[str, Scope] = dataclasses.field(default_factory=dict) + # Parts which will be added to the item's name in `[..]` separated by "-". + _idlist: List[str] = dataclasses.field(default_factory=list) + # Marks which will be applied to the item. + marks: List[Mark] = dataclasses.field(default_factory=list) + + def setmulti( + self, + *, + valtypes: Mapping[str, "Literal['params', 'funcargs']"], + argnames: Iterable[str], + valset: Iterable[object], + id: str, + marks: Iterable[Union[Mark, MarkDecorator]], + scope: Scope, + param_index: int, + ) -> "CallSpec2": + funcargs = self.funcargs.copy() + params = self.params.copy() + indices = self.indices.copy() + arg2scope = self._arg2scope.copy() + for arg, val in zip(argnames, valset): + if arg in params or arg in funcargs: + raise ValueError(f"duplicate parametrization of {arg!r}") + valtype_for_arg = valtypes[arg] + if valtype_for_arg == "params": + params[arg] = val + elif valtype_for_arg == "funcargs": + funcargs[arg] = val + else: + assert_never(valtype_for_arg) + indices[arg] = param_index + arg2scope[arg] = scope + return CallSpec2( + funcargs=funcargs, + params=params, + indices=indices, + _arg2scope=arg2scope, + _idlist=[*self._idlist, id], + marks=[*self.marks, *normalize_mark_list(marks)], + ) + + def getparam(self, name: str) -> object: + try: + return self.params[name] + except KeyError as e: + raise ValueError(name) from e + + @property + def id(self) -> str: + return "-".join(self._idlist) + + +@final +class Metafunc: + """Objects passed to the :hook:`pytest_generate_tests` hook. + + They help to inspect a test function and to generate tests according to + test configuration or values specified in the class or module where a + test function is defined. + """ + + def __init__( + self, + definition: "FunctionDefinition", + fixtureinfo: fixtures.FuncFixtureInfo, + config: Config, + cls=None, + module=None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + + #: Access to the underlying :class:`_pytest.python.FunctionDefinition`. + self.definition = definition + + #: Access to the :class:`pytest.Config` object for the test session. + self.config = config + + #: The module object where the test function is defined in. + self.module = module + + #: Underlying Python test function. + self.function = definition.obj + + #: Set of fixture names required by the test function. + self.fixturenames = fixtureinfo.names_closure + + #: Class object where the test function is defined in or ``None``. + self.cls = cls + + self._arg2fixturedefs = fixtureinfo.name2fixturedefs + + # Result of parametrize(). + self._calls: List[CallSpec2] = [] + + def parametrize( + self, + argnames: Union[str, Sequence[str]], + argvalues: Iterable[Union[ParameterSet, Sequence[object], object]], + indirect: Union[bool, Sequence[str]] = False, + ids: Optional[ + Union[Iterable[Optional[object]], Callable[[Any], Optional[object]]] + ] = None, + scope: "Optional[_ScopeName]" = None, + *, + _param_mark: Optional[Mark] = None, + ) -> None: + """Add new invocations to the underlying test function using the list + of argvalues for the given argnames. Parametrization is performed + during the collection phase. If you need to setup expensive resources + see about setting indirect to do it rather than at test setup time. + + Can be called multiple times per test function (but only on different + argument names), in which case each call parametrizes all previous + parametrizations, e.g. + + :: + + unparametrized: t + parametrize ["x", "y"]: t[x], t[y] + parametrize [1, 2]: t[x-1], t[x-2], t[y-1], t[y-2] + + :param argnames: + A comma-separated string denoting one or more argument names, or + a list/tuple of argument strings. + + :param argvalues: + The list of argvalues determines how often a test is invoked with + different argument values. + + If only one argname was specified argvalues is a list of values. + If N argnames were specified, argvalues must be a list of + N-tuples, where each tuple-element specifies a value for its + respective argname. + + :param indirect: + A list of arguments' names (subset of argnames) or a boolean. + If True the list contains all names from the argnames. Each + argvalue corresponding to an argname in this list will + be passed as request.param to its respective argname fixture + function so that it can perform more expensive setups during the + setup phase of a test rather than at collection time. + + :param ids: + Sequence of (or generator for) ids for ``argvalues``, + or a callable to return part of the id for each argvalue. + + With sequences (and generators like ``itertools.count()``) the + returned ids should be of type ``string``, ``int``, ``float``, + ``bool``, or ``None``. + They are mapped to the corresponding index in ``argvalues``. + ``None`` means to use the auto-generated id. + + If it is a callable it will be called for each entry in + ``argvalues``, and the return value is used as part of the + auto-generated id for the whole set (where parts are joined with + dashes ("-")). + This is useful to provide more specific ids for certain items, e.g. + dates. Returning ``None`` will use an auto-generated id. + + If no ids are provided they will be generated automatically from + the argvalues. + + :param scope: + If specified it denotes the scope of the parameters. + The scope is used for grouping tests by parameter instances. + It will also override any fixture-function defined scope, allowing + to set a dynamic scope using test context or configuration. + """ + argnames, parametersets = ParameterSet._for_parametrize( + argnames, + argvalues, + self.function, + self.config, + nodeid=self.definition.nodeid, + ) + del argvalues + + if "request" in argnames: + fail( + "'request' is a reserved name and cannot be used in @pytest.mark.parametrize", + pytrace=False, + ) + + if scope is not None: + scope_ = Scope.from_user( + scope, descr=f"parametrize() call in {self.function.__name__}" + ) + else: + scope_ = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect) + + self._validate_if_using_arg_names(argnames, indirect) + + arg_values_types = self._resolve_arg_value_types(argnames, indirect) + + # Use any already (possibly) generated ids with parametrize Marks. + if _param_mark and _param_mark._param_ids_from: + generated_ids = _param_mark._param_ids_from._param_ids_generated + if generated_ids is not None: + ids = generated_ids + + ids = self._resolve_parameter_set_ids( + argnames, ids, parametersets, nodeid=self.definition.nodeid + ) + + # Store used (possibly generated) ids with parametrize Marks. + if _param_mark and _param_mark._param_ids_from and generated_ids is None: + object.__setattr__(_param_mark._param_ids_from, "_param_ids_generated", ids) + + # Create the new calls: if we are parametrize() multiple times (by applying the decorator + # more than once) then we accumulate those calls generating the cartesian product + # of all calls. + newcalls = [] + for callspec in self._calls or [CallSpec2()]: + for param_index, (param_id, param_set) in enumerate( + zip(ids, parametersets) + ): + newcallspec = callspec.setmulti( + valtypes=arg_values_types, + argnames=argnames, + valset=param_set.values, + id=param_id, + marks=param_set.marks, + scope=scope_, + param_index=param_index, + ) + newcalls.append(newcallspec) + self._calls = newcalls + + def _resolve_parameter_set_ids( + self, + argnames: Sequence[str], + ids: Optional[ + Union[Iterable[Optional[object]], Callable[[Any], Optional[object]]] + ], + parametersets: Sequence[ParameterSet], + nodeid: str, + ) -> List[str]: + """Resolve the actual ids for the given parameter sets. + + :param argnames: + Argument names passed to ``parametrize()``. + :param ids: + The `ids` parameter of the ``parametrize()`` call (see docs). + :param parametersets: + The parameter sets, each containing a set of values corresponding + to ``argnames``. + :param nodeid str: + The nodeid of the definition item that generated this + parametrization. + :returns: + List with ids for each parameter set given. + """ + if ids is None: + idfn = None + ids_ = None + elif callable(ids): + idfn = ids + ids_ = None + else: + idfn = None + ids_ = self._validate_ids(ids, parametersets, self.function.__name__) + id_maker = IdMaker( + argnames, + parametersets, + idfn, + ids_, + self.config, + nodeid=nodeid, + func_name=self.function.__name__, + ) + return id_maker.make_unique_parameterset_ids() + + def _validate_ids( + self, + ids: Iterable[Optional[object]], + parametersets: Sequence[ParameterSet], + func_name: str, + ) -> List[Optional[object]]: + try: + num_ids = len(ids) # type: ignore[arg-type] + except TypeError: + try: + iter(ids) + except TypeError as e: + raise TypeError("ids must be a callable or an iterable") from e + num_ids = len(parametersets) + + # num_ids == 0 is a special case: https://github.com/pytest-dev/pytest/issues/1849 + if num_ids != len(parametersets) and num_ids != 0: + msg = "In {}: {} parameter sets specified, with different number of ids: {}" + fail(msg.format(func_name, len(parametersets), num_ids), pytrace=False) + + return list(itertools.islice(ids, num_ids)) + + def _resolve_arg_value_types( + self, + argnames: Sequence[str], + indirect: Union[bool, Sequence[str]], + ) -> Dict[str, "Literal['params', 'funcargs']"]: + """Resolve if each parametrized argument must be considered a + parameter to a fixture or a "funcarg" to the function, based on the + ``indirect`` parameter of the parametrized() call. + + :param List[str] argnames: List of argument names passed to ``parametrize()``. + :param indirect: Same as the ``indirect`` parameter of ``parametrize()``. + :rtype: Dict[str, str] + A dict mapping each arg name to either: + * "params" if the argname should be the parameter of a fixture of the same name. + * "funcargs" if the argname should be a parameter to the parametrized test function. + """ + if isinstance(indirect, bool): + valtypes: Dict[str, Literal["params", "funcargs"]] = dict.fromkeys( + argnames, "params" if indirect else "funcargs" + ) + elif isinstance(indirect, Sequence): + valtypes = dict.fromkeys(argnames, "funcargs") + for arg in indirect: + if arg not in argnames: + fail( + "In {}: indirect fixture '{}' doesn't exist".format( + self.function.__name__, arg + ), + pytrace=False, + ) + valtypes[arg] = "params" + else: + fail( + "In {func}: expected Sequence or boolean for indirect, got {type}".format( + type=type(indirect).__name__, func=self.function.__name__ + ), + pytrace=False, + ) + return valtypes + + def _validate_if_using_arg_names( + self, + argnames: Sequence[str], + indirect: Union[bool, Sequence[str]], + ) -> None: + """Check if all argnames are being used, by default values, or directly/indirectly. + + :param List[str] argnames: List of argument names passed to ``parametrize()``. + :param indirect: Same as the ``indirect`` parameter of ``parametrize()``. + :raises ValueError: If validation fails. + """ + default_arg_names = set(get_default_arg_names(self.function)) + func_name = self.function.__name__ + for arg in argnames: + if arg not in self.fixturenames: + if arg in default_arg_names: + fail( + "In {}: function already takes an argument '{}' with a default value".format( + func_name, arg + ), + pytrace=False, + ) + else: + if isinstance(indirect, Sequence): + name = "fixture" if arg in indirect else "argument" + else: + name = "fixture" if indirect else "argument" + fail( + f"In {func_name}: function uses no {name} '{arg}'", + pytrace=False, + ) + + +def _find_parametrized_scope( + argnames: Sequence[str], + arg2fixturedefs: Mapping[str, Sequence[fixtures.FixtureDef[object]]], + indirect: Union[bool, Sequence[str]], +) -> Scope: + """Find the most appropriate scope for a parametrized call based on its arguments. + + When there's at least one direct argument, always use "function" scope. + + When a test function is parametrized and all its arguments are indirect + (e.g. fixtures), return the most narrow scope based on the fixtures used. + + Related to issue #1832, based on code posted by @Kingdread. + """ + if isinstance(indirect, Sequence): + all_arguments_are_fixtures = len(indirect) == len(argnames) + else: + all_arguments_are_fixtures = bool(indirect) + + if all_arguments_are_fixtures: + fixturedefs = arg2fixturedefs or {} + used_scopes = [ + fixturedef[0]._scope + for name, fixturedef in fixturedefs.items() + if name in argnames + ] + # Takes the most narrow scope from used fixtures. + return min(used_scopes, default=Scope.Function) + + return Scope.Function + + +def _ascii_escaped_by_config(val: Union[str, bytes], config: Optional[Config]) -> str: + if config is None: + escape_option = False + else: + escape_option = config.getini( + "disable_test_id_escaping_and_forfeit_all_rights_to_community_support" + ) + # TODO: If escaping is turned off and the user passes bytes, + # will return a bytes. For now we ignore this but the + # code *probably* doesn't handle this case. + return val if escape_option else ascii_escaped(val) # type: ignore + + +def _pretty_fixture_path(func) -> str: + cwd = Path.cwd() + loc = Path(getlocation(func, str(cwd))) + prefix = Path("...", "_pytest") + try: + return str(prefix / loc.relative_to(_PYTEST_DIR)) + except ValueError: + return bestrelpath(cwd, loc) + + +def show_fixtures_per_test(config): + from _pytest.main import wrap_session + + return wrap_session(config, _show_fixtures_per_test) + + +def _show_fixtures_per_test(config: Config, session: Session) -> None: + import _pytest.config + + session.perform_collect() + curdir = Path.cwd() + tw = _pytest.config.create_terminal_writer(config) + verbose = config.getvalue("verbose") + + def get_best_relpath(func) -> str: + loc = getlocation(func, str(curdir)) + return bestrelpath(curdir, Path(loc)) + + def write_fixture(fixture_def: fixtures.FixtureDef[object]) -> None: + argname = fixture_def.argname + if verbose <= 0 and argname.startswith("_"): + return + prettypath = _pretty_fixture_path(fixture_def.func) + tw.write(f"{argname}", green=True) + tw.write(f" -- {prettypath}", yellow=True) + tw.write("\n") + fixture_doc = inspect.getdoc(fixture_def.func) + if fixture_doc: + write_docstring( + tw, fixture_doc.split("\n\n")[0] if verbose <= 0 else fixture_doc + ) + else: + tw.line(" no docstring available", red=True) + + def write_item(item: nodes.Item) -> None: + # Not all items have _fixtureinfo attribute. + info: Optional[FuncFixtureInfo] = getattr(item, "_fixtureinfo", None) + if info is None or not info.name2fixturedefs: + # This test item does not use any fixtures. + return + tw.line() + tw.sep("-", f"fixtures used by {item.name}") + # TODO: Fix this type ignore. + tw.sep("-", f"({get_best_relpath(item.function)})") # type: ignore[attr-defined] + # dict key not used in loop but needed for sorting. + for _, fixturedefs in sorted(info.name2fixturedefs.items()): + assert fixturedefs is not None + if not fixturedefs: + continue + # Last item is expected to be the one used by the test item. + write_fixture(fixturedefs[-1]) + + for session_item in session.items: + write_item(session_item) + + +def showfixtures(config: Config) -> Union[int, ExitCode]: + from _pytest.main import wrap_session + + return wrap_session(config, _showfixtures_main) + + +def _showfixtures_main(config: Config, session: Session) -> None: + import _pytest.config + + session.perform_collect() + curdir = Path.cwd() + tw = _pytest.config.create_terminal_writer(config) + verbose = config.getvalue("verbose") + + fm = session._fixturemanager + + available = [] + seen: Set[Tuple[str, str]] = set() + + for argname, fixturedefs in fm._arg2fixturedefs.items(): + assert fixturedefs is not None + if not fixturedefs: + continue + for fixturedef in fixturedefs: + loc = getlocation(fixturedef.func, str(curdir)) + if (fixturedef.argname, loc) in seen: + continue + seen.add((fixturedef.argname, loc)) + available.append( + ( + len(fixturedef.baseid), + fixturedef.func.__module__, + _pretty_fixture_path(fixturedef.func), + fixturedef.argname, + fixturedef, + ) + ) + + available.sort() + currentmodule = None + for baseid, module, prettypath, argname, fixturedef in available: + if currentmodule != module: + if not module.startswith("_pytest."): + tw.line() + tw.sep("-", f"fixtures defined from {module}") + currentmodule = module + if verbose <= 0 and argname.startswith("_"): + continue + tw.write(f"{argname}", green=True) + if fixturedef.scope != "function": + tw.write(" [%s scope]" % fixturedef.scope, cyan=True) + tw.write(f" -- {prettypath}", yellow=True) + tw.write("\n") + doc = inspect.getdoc(fixturedef.func) + if doc: + write_docstring(tw, doc.split("\n\n")[0] if verbose <= 0 else doc) + else: + tw.line(" no docstring available", red=True) + tw.line() + + +def write_docstring(tw: TerminalWriter, doc: str, indent: str = " ") -> None: + for line in doc.split("\n"): + tw.line(indent + line) + + +class Function(PyobjMixin, nodes.Item): + """Item responsible for setting up and executing a Python test function. + + :param name: + The full function name, including any decorations like those + added by parametrization (``my_func[my_param]``). + :param parent: + The parent Node. + :param config: + The pytest Config object. + :param callspec: + If given, this is function has been parametrized and the callspec contains + meta information about the parametrization. + :param callobj: + If given, the object which will be called when the Function is invoked, + otherwise the callobj will be obtained from ``parent`` using ``originalname``. + :param keywords: + Keywords bound to the function object for "-k" matching. + :param session: + The pytest Session object. + :param fixtureinfo: + Fixture information already resolved at this fixture node.. + :param originalname: + The attribute name to use for accessing the underlying function object. + Defaults to ``name``. Set this if name is different from the original name, + for example when it contains decorations like those added by parametrization + (``my_func[my_param]``). + """ + + # Disable since functions handle it themselves. + _ALLOW_MARKERS = False + + def __init__( + self, + name: str, + parent, + config: Optional[Config] = None, + callspec: Optional[CallSpec2] = None, + callobj=NOTSET, + keywords: Optional[Mapping[str, Any]] = None, + session: Optional[Session] = None, + fixtureinfo: Optional[FuncFixtureInfo] = None, + originalname: Optional[str] = None, + ) -> None: + super().__init__(name, parent, config=config, session=session) + + if callobj is not NOTSET: + self.obj = callobj + + #: Original function name, without any decorations (for example + #: parametrization adds a ``"[...]"`` suffix to function names), used to access + #: the underlying function object from ``parent`` (in case ``callobj`` is not given + #: explicitly). + #: + #: .. versionadded:: 3.0 + self.originalname = originalname or name + + # Note: when FunctionDefinition is introduced, we should change ``originalname`` + # to a readonly property that returns FunctionDefinition.name. + + self.own_markers.extend(get_unpacked_marks(self.obj)) + if callspec: + self.callspec = callspec + self.own_markers.extend(callspec.marks) + + # todo: this is a hell of a hack + # https://github.com/pytest-dev/pytest/issues/4569 + # Note: the order of the updates is important here; indicates what + # takes priority (ctor argument over function attributes over markers). + # Take own_markers only; NodeKeywords handles parent traversal on its own. + self.keywords.update((mark.name, mark) for mark in self.own_markers) + self.keywords.update(self.obj.__dict__) + if keywords: + self.keywords.update(keywords) + + if fixtureinfo is None: + fixtureinfo = self.session._fixturemanager.getfixtureinfo( + self, self.obj, self.cls, funcargs=True + ) + self._fixtureinfo: FuncFixtureInfo = fixtureinfo + self.fixturenames = fixtureinfo.names_closure + self._initrequest() + + @classmethod + def from_parent(cls, parent, **kw): # todo: determine sound type limitations + """The public constructor.""" + return super().from_parent(parent=parent, **kw) + + def _initrequest(self) -> None: + self.funcargs: Dict[str, object] = {} + self._request = fixtures.FixtureRequest(self, _ispytest=True) + + @property + def function(self): + """Underlying python 'function' object.""" + return getimfunc(self.obj) + + def _getobj(self): + assert self.parent is not None + if isinstance(self.parent, Class): + # Each Function gets a fresh class instance. + parent_obj = self.parent.newinstance() + else: + parent_obj = self.parent.obj # type: ignore[attr-defined] + return getattr(parent_obj, self.originalname) + + @property + def _pyfuncitem(self): + """(compatonly) for code expecting pytest-2.2 style request objects.""" + return self + + def runtest(self) -> None: + """Execute the underlying test function.""" + self.ihook.pytest_pyfunc_call(pyfuncitem=self) + + def setup(self) -> None: + self._request._fillfixtures() + + def _traceback_filter(self, excinfo: ExceptionInfo[BaseException]) -> Traceback: + if hasattr(self, "_obj") and not self.config.getoption("fulltrace", False): + code = _pytest._code.Code.from_function(get_real_func(self.obj)) + path, firstlineno = code.path, code.firstlineno + traceback = excinfo.traceback + ntraceback = traceback.cut(path=path, firstlineno=firstlineno) + if ntraceback == traceback: + ntraceback = ntraceback.cut(path=path) + if ntraceback == traceback: + ntraceback = ntraceback.filter(filter_traceback) + if not ntraceback: + ntraceback = traceback + ntraceback = ntraceback.filter(excinfo) + + # issue364: mark all but first and last frames to + # only show a single-line message for each frame. + if self.config.getoption("tbstyle", "auto") == "auto": + if len(ntraceback) > 2: + ntraceback = Traceback( + entry + if i == 0 or i == len(ntraceback) - 1 + else entry.with_repr_style("short") + for i, entry in enumerate(ntraceback) + ) + + return ntraceback + return excinfo.traceback + + # TODO: Type ignored -- breaks Liskov Substitution. + def repr_failure( # type: ignore[override] + self, + excinfo: ExceptionInfo[BaseException], + ) -> Union[str, TerminalRepr]: + style = self.config.getoption("tbstyle", "auto") + if style == "auto": + style = "long" + return self._repr_failure_py(excinfo, style=style) + + +class FunctionDefinition(Function): + """This class is a stop gap solution until we evolve to have actual function + definition nodes and manage to get rid of ``metafunc``.""" + + def runtest(self) -> None: + raise RuntimeError("function definitions are not supposed to be run as tests") + + setup = runtest diff --git a/venv/lib/python3.12/site-packages/_pytest/python_api.py b/venv/lib/python3.12/site-packages/_pytest/python_api.py new file mode 100644 index 0000000000..183356100c --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/python_api.py @@ -0,0 +1,996 @@ +import math +import pprint +from collections.abc import Collection +from collections.abc import Sized +from decimal import Decimal +from numbers import Complex +from types import TracebackType +from typing import Any +from typing import Callable +from typing import cast +from typing import ContextManager +from typing import List +from typing import Mapping +from typing import Optional +from typing import Pattern +from typing import Sequence +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +if TYPE_CHECKING: + from numpy import ndarray + + +import _pytest._code +from _pytest.compat import final +from _pytest.compat import STRING_TYPES +from _pytest.compat import overload +from _pytest.outcomes import fail + + +def _non_numeric_type_error(value, at: Optional[str]) -> TypeError: + at_str = f" at {at}" if at else "" + return TypeError( + "cannot make approximate comparisons to non-numeric values: {!r} {}".format( + value, at_str + ) + ) + + +def _compare_approx( + full_object: object, + message_data: Sequence[Tuple[str, str, str]], + number_of_elements: int, + different_ids: Sequence[object], + max_abs_diff: float, + max_rel_diff: float, +) -> List[str]: + message_list = list(message_data) + message_list.insert(0, ("Index", "Obtained", "Expected")) + max_sizes = [0, 0, 0] + for index, obtained, expected in message_list: + max_sizes[0] = max(max_sizes[0], len(index)) + max_sizes[1] = max(max_sizes[1], len(obtained)) + max_sizes[2] = max(max_sizes[2], len(expected)) + explanation = [ + f"comparison failed. Mismatched elements: {len(different_ids)} / {number_of_elements}:", + f"Max absolute difference: {max_abs_diff}", + f"Max relative difference: {max_rel_diff}", + ] + [ + f"{indexes:<{max_sizes[0]}} | {obtained:<{max_sizes[1]}} | {expected:<{max_sizes[2]}}" + for indexes, obtained, expected in message_list + ] + return explanation + + +# builtin pytest.approx helper + + +class ApproxBase: + """Provide shared utilities for making approximate comparisons between + numbers or sequences of numbers.""" + + # Tell numpy to use our `__eq__` operator instead of its. + __array_ufunc__ = None + __array_priority__ = 100 + + def __init__(self, expected, rel=None, abs=None, nan_ok: bool = False) -> None: + __tracebackhide__ = True + self.expected = expected + self.abs = abs + self.rel = rel + self.nan_ok = nan_ok + self._check_type() + + def __repr__(self) -> str: + raise NotImplementedError + + def _repr_compare(self, other_side: Any) -> List[str]: + return [ + "comparison failed", + f"Obtained: {other_side}", + f"Expected: {self}", + ] + + def __eq__(self, actual) -> bool: + return all( + a == self._approx_scalar(x) for a, x in self._yield_comparisons(actual) + ) + + def __bool__(self): + __tracebackhide__ = True + raise AssertionError( + "approx() is not supported in a boolean context.\nDid you mean: `assert a == approx(b)`?" + ) + + # Ignore type because of https://github.com/python/mypy/issues/4266. + __hash__ = None # type: ignore + + def __ne__(self, actual) -> bool: + return not (actual == self) + + def _approx_scalar(self, x) -> "ApproxScalar": + if isinstance(x, Decimal): + return ApproxDecimal(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok) + return ApproxScalar(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok) + + def _yield_comparisons(self, actual): + """Yield all the pairs of numbers to be compared. + + This is used to implement the `__eq__` method. + """ + raise NotImplementedError + + def _check_type(self) -> None: + """Raise a TypeError if the expected value is not a valid type.""" + # This is only a concern if the expected value is a sequence. In every + # other case, the approx() function ensures that the expected value has + # a numeric type. For this reason, the default is to do nothing. The + # classes that deal with sequences should reimplement this method to + # raise if there are any non-numeric elements in the sequence. + + +def _recursive_sequence_map(f, x): + """Recursively map a function over a sequence of arbitrary depth""" + if isinstance(x, (list, tuple)): + seq_type = type(x) + return seq_type(_recursive_sequence_map(f, xi) for xi in x) + else: + return f(x) + + +class ApproxNumpy(ApproxBase): + """Perform approximate comparisons where the expected value is numpy array.""" + + def __repr__(self) -> str: + list_scalars = _recursive_sequence_map( + self._approx_scalar, self.expected.tolist() + ) + return f"approx({list_scalars!r})" + + def _repr_compare(self, other_side: "ndarray") -> List[str]: + import itertools + import math + + def get_value_from_nested_list( + nested_list: List[Any], nd_index: Tuple[Any, ...] + ) -> Any: + """ + Helper function to get the value out of a nested list, given an n-dimensional index. + This mimics numpy's indexing, but for raw nested python lists. + """ + value: Any = nested_list + for i in nd_index: + value = value[i] + return value + + np_array_shape = self.expected.shape + approx_side_as_seq = _recursive_sequence_map( + self._approx_scalar, self.expected.tolist() + ) + + if np_array_shape != other_side.shape: + return [ + "Impossible to compare arrays with different shapes.", + f"Shapes: {np_array_shape} and {other_side.shape}", + ] + + number_of_elements = self.expected.size + max_abs_diff = -math.inf + max_rel_diff = -math.inf + different_ids = [] + for index in itertools.product(*(range(i) for i in np_array_shape)): + approx_value = get_value_from_nested_list(approx_side_as_seq, index) + other_value = get_value_from_nested_list(other_side, index) + if approx_value != other_value: + abs_diff = abs(approx_value.expected - other_value) + max_abs_diff = max(max_abs_diff, abs_diff) + if other_value == 0.0: + max_rel_diff = math.inf + else: + max_rel_diff = max(max_rel_diff, abs_diff / abs(other_value)) + different_ids.append(index) + + message_data = [ + ( + str(index), + str(get_value_from_nested_list(other_side, index)), + str(get_value_from_nested_list(approx_side_as_seq, index)), + ) + for index in different_ids + ] + return _compare_approx( + self.expected, + message_data, + number_of_elements, + different_ids, + max_abs_diff, + max_rel_diff, + ) + + def __eq__(self, actual) -> bool: + import numpy as np + + # self.expected is supposed to always be an array here. + + if not np.isscalar(actual): + try: + actual = np.asarray(actual) + except Exception as e: + raise TypeError(f"cannot compare '{actual}' to numpy.ndarray") from e + + if not np.isscalar(actual) and actual.shape != self.expected.shape: + return False + + return super().__eq__(actual) + + def _yield_comparisons(self, actual): + import numpy as np + + # `actual` can either be a numpy array or a scalar, it is treated in + # `__eq__` before being passed to `ApproxBase.__eq__`, which is the + # only method that calls this one. + + if np.isscalar(actual): + for i in np.ndindex(self.expected.shape): + yield actual, self.expected[i].item() + else: + for i in np.ndindex(self.expected.shape): + yield actual[i].item(), self.expected[i].item() + + +class ApproxMapping(ApproxBase): + """Perform approximate comparisons where the expected value is a mapping + with numeric values (the keys can be anything).""" + + def __repr__(self) -> str: + return "approx({!r})".format( + {k: self._approx_scalar(v) for k, v in self.expected.items()} + ) + + def _repr_compare(self, other_side: Mapping[object, float]) -> List[str]: + import math + + approx_side_as_map = { + k: self._approx_scalar(v) for k, v in self.expected.items() + } + + number_of_elements = len(approx_side_as_map) + max_abs_diff = -math.inf + max_rel_diff = -math.inf + different_ids = [] + for (approx_key, approx_value), other_value in zip( + approx_side_as_map.items(), other_side.values() + ): + if approx_value != other_value: + if approx_value.expected is not None and other_value is not None: + max_abs_diff = max( + max_abs_diff, abs(approx_value.expected - other_value) + ) + if approx_value.expected == 0.0: + max_rel_diff = math.inf + else: + max_rel_diff = max( + max_rel_diff, + abs( + (approx_value.expected - other_value) + / approx_value.expected + ), + ) + different_ids.append(approx_key) + + message_data = [ + (str(key), str(other_side[key]), str(approx_side_as_map[key])) + for key in different_ids + ] + + return _compare_approx( + self.expected, + message_data, + number_of_elements, + different_ids, + max_abs_diff, + max_rel_diff, + ) + + def __eq__(self, actual) -> bool: + try: + if set(actual.keys()) != set(self.expected.keys()): + return False + except AttributeError: + return False + + return super().__eq__(actual) + + def _yield_comparisons(self, actual): + for k in self.expected.keys(): + yield actual[k], self.expected[k] + + def _check_type(self) -> None: + __tracebackhide__ = True + for key, value in self.expected.items(): + if isinstance(value, type(self.expected)): + msg = "pytest.approx() does not support nested dictionaries: key={!r} value={!r}\n full mapping={}" + raise TypeError(msg.format(key, value, pprint.pformat(self.expected))) + + +class ApproxSequenceLike(ApproxBase): + """Perform approximate comparisons where the expected value is a sequence of numbers.""" + + def __repr__(self) -> str: + seq_type = type(self.expected) + if seq_type not in (tuple, list): + seq_type = list + return "approx({!r})".format( + seq_type(self._approx_scalar(x) for x in self.expected) + ) + + def _repr_compare(self, other_side: Sequence[float]) -> List[str]: + import math + + if len(self.expected) != len(other_side): + return [ + "Impossible to compare lists with different sizes.", + f"Lengths: {len(self.expected)} and {len(other_side)}", + ] + + approx_side_as_map = _recursive_sequence_map(self._approx_scalar, self.expected) + + number_of_elements = len(approx_side_as_map) + max_abs_diff = -math.inf + max_rel_diff = -math.inf + different_ids = [] + for i, (approx_value, other_value) in enumerate( + zip(approx_side_as_map, other_side) + ): + if approx_value != other_value: + abs_diff = abs(approx_value.expected - other_value) + max_abs_diff = max(max_abs_diff, abs_diff) + if other_value == 0.0: + max_rel_diff = math.inf + else: + max_rel_diff = max(max_rel_diff, abs_diff / abs(other_value)) + different_ids.append(i) + + message_data = [ + (str(i), str(other_side[i]), str(approx_side_as_map[i])) + for i in different_ids + ] + + return _compare_approx( + self.expected, + message_data, + number_of_elements, + different_ids, + max_abs_diff, + max_rel_diff, + ) + + def __eq__(self, actual) -> bool: + try: + if len(actual) != len(self.expected): + return False + except TypeError: + return False + return super().__eq__(actual) + + def _yield_comparisons(self, actual): + return zip(actual, self.expected) + + def _check_type(self) -> None: + __tracebackhide__ = True + for index, x in enumerate(self.expected): + if isinstance(x, type(self.expected)): + msg = "pytest.approx() does not support nested data structures: {!r} at index {}\n full sequence: {}" + raise TypeError(msg.format(x, index, pprint.pformat(self.expected))) + + +class ApproxScalar(ApproxBase): + """Perform approximate comparisons where the expected value is a single number.""" + + # Using Real should be better than this Union, but not possible yet: + # https://github.com/python/typeshed/pull/3108 + DEFAULT_ABSOLUTE_TOLERANCE: Union[float, Decimal] = 1e-12 + DEFAULT_RELATIVE_TOLERANCE: Union[float, Decimal] = 1e-6 + + def __repr__(self) -> str: + """Return a string communicating both the expected value and the + tolerance for the comparison being made. + + For example, ``1.0 ± 1e-6``, ``(3+4j) ± 5e-6 ∠ ±180°``. + """ + # Don't show a tolerance for values that aren't compared using + # tolerances, i.e. non-numerics and infinities. Need to call abs to + # handle complex numbers, e.g. (inf + 1j). + if (not isinstance(self.expected, (Complex, Decimal))) or math.isinf( + abs(self.expected) # type: ignore[arg-type] + ): + return str(self.expected) + + # If a sensible tolerance can't be calculated, self.tolerance will + # raise a ValueError. In this case, display '???'. + try: + vetted_tolerance = f"{self.tolerance:.1e}" + if ( + isinstance(self.expected, Complex) + and self.expected.imag + and not math.isinf(self.tolerance) + ): + vetted_tolerance += " ∠ ±180°" + except ValueError: + vetted_tolerance = "???" + + return f"{self.expected} ± {vetted_tolerance}" + + def __eq__(self, actual) -> bool: + """Return whether the given value is equal to the expected value + within the pre-specified tolerance.""" + asarray = _as_numpy_array(actual) + if asarray is not None: + # Call ``__eq__()`` manually to prevent infinite-recursion with + # numpy<1.13. See #3748. + return all(self.__eq__(a) for a in asarray.flat) + + # Short-circuit exact equality. + if actual == self.expected: + return True + + # If either type is non-numeric, fall back to strict equality. + # NB: we need Complex, rather than just Number, to ensure that __abs__, + # __sub__, and __float__ are defined. + if not ( + isinstance(self.expected, (Complex, Decimal)) + and isinstance(actual, (Complex, Decimal)) + ): + return False + + # Allow the user to control whether NaNs are considered equal to each + # other or not. The abs() calls are for compatibility with complex + # numbers. + if math.isnan(abs(self.expected)): # type: ignore[arg-type] + return self.nan_ok and math.isnan(abs(actual)) # type: ignore[arg-type] + + # Infinity shouldn't be approximately equal to anything but itself, but + # if there's a relative tolerance, it will be infinite and infinity + # will seem approximately equal to everything. The equal-to-itself + # case would have been short circuited above, so here we can just + # return false if the expected value is infinite. The abs() call is + # for compatibility with complex numbers. + if math.isinf(abs(self.expected)): # type: ignore[arg-type] + return False + + # Return true if the two numbers are within the tolerance. + result: bool = abs(self.expected - actual) <= self.tolerance + return result + + # Ignore type because of https://github.com/python/mypy/issues/4266. + __hash__ = None # type: ignore + + @property + def tolerance(self): + """Return the tolerance for the comparison. + + This could be either an absolute tolerance or a relative tolerance, + depending on what the user specified or which would be larger. + """ + + def set_default(x, default): + return x if x is not None else default + + # Figure out what the absolute tolerance should be. ``self.abs`` is + # either None or a value specified by the user. + absolute_tolerance = set_default(self.abs, self.DEFAULT_ABSOLUTE_TOLERANCE) + + if absolute_tolerance < 0: + raise ValueError( + f"absolute tolerance can't be negative: {absolute_tolerance}" + ) + if math.isnan(absolute_tolerance): + raise ValueError("absolute tolerance can't be NaN.") + + # If the user specified an absolute tolerance but not a relative one, + # just return the absolute tolerance. + if self.rel is None: + if self.abs is not None: + return absolute_tolerance + + # Figure out what the relative tolerance should be. ``self.rel`` is + # either None or a value specified by the user. This is done after + # we've made sure the user didn't ask for an absolute tolerance only, + # because we don't want to raise errors about the relative tolerance if + # we aren't even going to use it. + relative_tolerance = set_default( + self.rel, self.DEFAULT_RELATIVE_TOLERANCE + ) * abs(self.expected) + + if relative_tolerance < 0: + raise ValueError( + f"relative tolerance can't be negative: {relative_tolerance}" + ) + if math.isnan(relative_tolerance): + raise ValueError("relative tolerance can't be NaN.") + + # Return the larger of the relative and absolute tolerances. + return max(relative_tolerance, absolute_tolerance) + + +class ApproxDecimal(ApproxScalar): + """Perform approximate comparisons where the expected value is a Decimal.""" + + DEFAULT_ABSOLUTE_TOLERANCE = Decimal("1e-12") + DEFAULT_RELATIVE_TOLERANCE = Decimal("1e-6") + + +def approx(expected, rel=None, abs=None, nan_ok: bool = False) -> ApproxBase: + """Assert that two numbers (or two ordered sequences of numbers) are equal to each other + within some tolerance. + + Due to the :doc:`python:tutorial/floatingpoint`, numbers that we + would intuitively expect to be equal are not always so:: + + >>> 0.1 + 0.2 == 0.3 + False + + This problem is commonly encountered when writing tests, e.g. when making + sure that floating-point values are what you expect them to be. One way to + deal with this problem is to assert that two floating-point numbers are + equal to within some appropriate tolerance:: + + >>> abs((0.1 + 0.2) - 0.3) < 1e-6 + True + + However, comparisons like this are tedious to write and difficult to + understand. Furthermore, absolute comparisons like the one above are + usually discouraged because there's no tolerance that works well for all + situations. ``1e-6`` is good for numbers around ``1``, but too small for + very big numbers and too big for very small ones. It's better to express + the tolerance as a fraction of the expected value, but relative comparisons + like that are even more difficult to write correctly and concisely. + + The ``approx`` class performs floating-point comparisons using a syntax + that's as intuitive as possible:: + + >>> from pytest import approx + >>> 0.1 + 0.2 == approx(0.3) + True + + The same syntax also works for ordered sequences of numbers:: + + >>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6)) + True + + ``numpy`` arrays:: + + >>> import numpy as np # doctest: +SKIP + >>> np.array([0.1, 0.2]) + np.array([0.2, 0.4]) == approx(np.array([0.3, 0.6])) # doctest: +SKIP + True + + And for a ``numpy`` array against a scalar:: + + >>> import numpy as np # doctest: +SKIP + >>> np.array([0.1, 0.2]) + np.array([0.2, 0.1]) == approx(0.3) # doctest: +SKIP + True + + Only ordered sequences are supported, because ``approx`` needs + to infer the relative position of the sequences without ambiguity. This means + ``sets`` and other unordered sequences are not supported. + + Finally, dictionary *values* can also be compared:: + + >>> {'a': 0.1 + 0.2, 'b': 0.2 + 0.4} == approx({'a': 0.3, 'b': 0.6}) + True + + The comparison will be true if both mappings have the same keys and their + respective values match the expected tolerances. + + **Tolerances** + + By default, ``approx`` considers numbers within a relative tolerance of + ``1e-6`` (i.e. one part in a million) of its expected value to be equal. + This treatment would lead to surprising results if the expected value was + ``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``. + To handle this case less surprisingly, ``approx`` also considers numbers + within an absolute tolerance of ``1e-12`` of its expected value to be + equal. Infinity and NaN are special cases. Infinity is only considered + equal to itself, regardless of the relative tolerance. NaN is not + considered equal to anything by default, but you can make it be equal to + itself by setting the ``nan_ok`` argument to True. (This is meant to + facilitate comparing arrays that use NaN to mean "no data".) + + Both the relative and absolute tolerances can be changed by passing + arguments to the ``approx`` constructor:: + + >>> 1.0001 == approx(1) + False + >>> 1.0001 == approx(1, rel=1e-3) + True + >>> 1.0001 == approx(1, abs=1e-3) + True + + If you specify ``abs`` but not ``rel``, the comparison will not consider + the relative tolerance at all. In other words, two numbers that are within + the default relative tolerance of ``1e-6`` will still be considered unequal + if they exceed the specified absolute tolerance. If you specify both + ``abs`` and ``rel``, the numbers will be considered equal if either + tolerance is met:: + + >>> 1 + 1e-8 == approx(1) + True + >>> 1 + 1e-8 == approx(1, abs=1e-12) + False + >>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12) + True + + You can also use ``approx`` to compare nonnumeric types, or dicts and + sequences containing nonnumeric types, in which case it falls back to + strict equality. This can be useful for comparing dicts and sequences that + can contain optional values:: + + >>> {"required": 1.0000005, "optional": None} == approx({"required": 1, "optional": None}) + True + >>> [None, 1.0000005] == approx([None,1]) + True + >>> ["foo", 1.0000005] == approx([None,1]) + False + + If you're thinking about using ``approx``, then you might want to know how + it compares to other good ways of comparing floating-point numbers. All of + these algorithms are based on relative and absolute tolerances and should + agree for the most part, but they do have meaningful differences: + + - ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative + tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute + tolerance is met. Because the relative tolerance is calculated w.r.t. + both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor + ``b`` is a "reference value"). You have to specify an absolute tolerance + if you want to compare to ``0.0`` because there is no tolerance by + default. More information: :py:func:`math.isclose`. + + - ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference + between ``a`` and ``b`` is less that the sum of the relative tolerance + w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance + is only calculated w.r.t. ``b``, this test is asymmetric and you can + think of ``b`` as the reference value. Support for comparing sequences + is provided by :py:func:`numpy.allclose`. More information: + :std:doc:`numpy:reference/generated/numpy.isclose`. + + - ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b`` + are within an absolute tolerance of ``1e-7``. No relative tolerance is + considered , so this function is not appropriate for very large or very + small numbers. Also, it's only available in subclasses of ``unittest.TestCase`` + and it's ugly because it doesn't follow PEP8. More information: + :py:meth:`unittest.TestCase.assertAlmostEqual`. + + - ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative + tolerance is met w.r.t. ``b`` or if the absolute tolerance is met. + Because the relative tolerance is only calculated w.r.t. ``b``, this test + is asymmetric and you can think of ``b`` as the reference value. In the + special case that you explicitly specify an absolute tolerance but not a + relative tolerance, only the absolute tolerance is considered. + + .. note:: + + ``approx`` can handle numpy arrays, but we recommend the + specialised test helpers in :std:doc:`numpy:reference/routines.testing` + if you need support for comparisons, NaNs, or ULP-based tolerances. + + To match strings using regex, you can use + `Matches `_ + from the + `re_assert package `_. + + .. warning:: + + .. versionchanged:: 3.2 + + In order to avoid inconsistent behavior, :py:exc:`TypeError` is + raised for ``>``, ``>=``, ``<`` and ``<=`` comparisons. + The example below illustrates the problem:: + + assert approx(0.1) > 0.1 + 1e-10 # calls approx(0.1).__gt__(0.1 + 1e-10) + assert 0.1 + 1e-10 > approx(0.1) # calls approx(0.1).__lt__(0.1 + 1e-10) + + In the second example one expects ``approx(0.1).__le__(0.1 + 1e-10)`` + to be called. But instead, ``approx(0.1).__lt__(0.1 + 1e-10)`` is used to + comparison. This is because the call hierarchy of rich comparisons + follows a fixed behavior. More information: :py:meth:`object.__ge__` + + .. versionchanged:: 3.7.1 + ``approx`` raises ``TypeError`` when it encounters a dict value or + sequence element of nonnumeric type. + + .. versionchanged:: 6.1.0 + ``approx`` falls back to strict equality for nonnumeric types instead + of raising ``TypeError``. + """ + + # Delegate the comparison to a class that knows how to deal with the type + # of the expected value (e.g. int, float, list, dict, numpy.array, etc). + # + # The primary responsibility of these classes is to implement ``__eq__()`` + # and ``__repr__()``. The former is used to actually check if some + # "actual" value is equivalent to the given expected value within the + # allowed tolerance. The latter is used to show the user the expected + # value and tolerance, in the case that a test failed. + # + # The actual logic for making approximate comparisons can be found in + # ApproxScalar, which is used to compare individual numbers. All of the + # other Approx classes eventually delegate to this class. The ApproxBase + # class provides some convenient methods and overloads, but isn't really + # essential. + + __tracebackhide__ = True + + if isinstance(expected, Decimal): + cls: Type[ApproxBase] = ApproxDecimal + elif isinstance(expected, Mapping): + cls = ApproxMapping + elif _is_numpy_array(expected): + expected = _as_numpy_array(expected) + cls = ApproxNumpy + elif ( + hasattr(expected, "__getitem__") + and isinstance(expected, Sized) + # Type ignored because the error is wrong -- not unreachable. + and not isinstance(expected, STRING_TYPES) # type: ignore[unreachable] + ): + cls = ApproxSequenceLike + elif ( + isinstance(expected, Collection) + # Type ignored because the error is wrong -- not unreachable. + and not isinstance(expected, STRING_TYPES) # type: ignore[unreachable] + ): + msg = f"pytest.approx() only supports ordered sequences, but got: {repr(expected)}" + raise TypeError(msg) + else: + cls = ApproxScalar + + return cls(expected, rel, abs, nan_ok) + + +def _is_numpy_array(obj: object) -> bool: + """ + Return true if the given object is implicitly convertible to ndarray, + and numpy is already imported. + """ + return _as_numpy_array(obj) is not None + + +def _as_numpy_array(obj: object) -> Optional["ndarray"]: + """ + Return an ndarray if the given object is implicitly convertible to ndarray, + and numpy is already imported, otherwise None. + """ + import sys + + np: Any = sys.modules.get("numpy") + if np is not None: + # avoid infinite recursion on numpy scalars, which have __array__ + if np.isscalar(obj): + return None + elif isinstance(obj, np.ndarray): + return obj + elif hasattr(obj, "__array__") or hasattr("obj", "__array_interface__"): + return np.asarray(obj) + return None + + +# builtin pytest.raises helper + +E = TypeVar("E", bound=BaseException) + + +@overload +def raises( + expected_exception: Union[Type[E], Tuple[Type[E], ...]], + *, + match: Optional[Union[str, Pattern[str]]] = ..., +) -> "RaisesContext[E]": + ... + + +@overload +def raises( # noqa: F811 + expected_exception: Union[Type[E], Tuple[Type[E], ...]], + func: Callable[..., Any], + *args: Any, + **kwargs: Any, +) -> _pytest._code.ExceptionInfo[E]: + ... + + +def raises( # noqa: F811 + expected_exception: Union[Type[E], Tuple[Type[E], ...]], *args: Any, **kwargs: Any +) -> Union["RaisesContext[E]", _pytest._code.ExceptionInfo[E]]: + r"""Assert that a code block/function call raises an exception. + + :param typing.Type[E] | typing.Tuple[typing.Type[E], ...] expected_exception: + The expected exception type, or a tuple if one of multiple possible + exception types are expected. + :kwparam str | typing.Pattern[str] | None match: + If specified, a string containing a regular expression, + or a regular expression object, that is tested against the string + representation of the exception using :func:`re.search`. + + To match a literal string that may contain :ref:`special characters + `, the pattern can first be escaped with :func:`re.escape`. + + (This is only used when :py:func:`pytest.raises` is used as a context manager, + and passed through to the function otherwise. + When using :py:func:`pytest.raises` as a function, you can use: + ``pytest.raises(Exc, func, match="passed on").match("my pattern")``.) + + .. currentmodule:: _pytest._code + + Use ``pytest.raises`` as a context manager, which will capture the exception of the given + type:: + + >>> import pytest + >>> with pytest.raises(ZeroDivisionError): + ... 1/0 + + If the code block does not raise the expected exception (``ZeroDivisionError`` in the example + above), or no exception at all, the check will fail instead. + + You can also use the keyword argument ``match`` to assert that the + exception matches a text or regex:: + + >>> with pytest.raises(ValueError, match='must be 0 or None'): + ... raise ValueError("value must be 0 or None") + + >>> with pytest.raises(ValueError, match=r'must be \d+$'): + ... raise ValueError("value must be 42") + + The context manager produces an :class:`ExceptionInfo` object which can be used to inspect the + details of the captured exception:: + + >>> with pytest.raises(ValueError) as exc_info: + ... raise ValueError("value must be 42") + >>> assert exc_info.type is ValueError + >>> assert exc_info.value.args[0] == "value must be 42" + + .. note:: + + When using ``pytest.raises`` as a context manager, it's worthwhile to + note that normal context manager rules apply and that the exception + raised *must* be the final line in the scope of the context manager. + Lines of code after that, within the scope of the context manager will + not be executed. For example:: + + >>> value = 15 + >>> with pytest.raises(ValueError) as exc_info: + ... if value > 10: + ... raise ValueError("value must be <= 10") + ... assert exc_info.type is ValueError # this will not execute + + Instead, the following approach must be taken (note the difference in + scope):: + + >>> with pytest.raises(ValueError) as exc_info: + ... if value > 10: + ... raise ValueError("value must be <= 10") + ... + >>> assert exc_info.type is ValueError + + **Using with** ``pytest.mark.parametrize`` + + When using :ref:`pytest.mark.parametrize ref` + it is possible to parametrize tests such that + some runs raise an exception and others do not. + + See :ref:`parametrizing_conditional_raising` for an example. + + **Legacy form** + + It is possible to specify a callable by passing a to-be-called lambda:: + + >>> raises(ZeroDivisionError, lambda: 1/0) + + + or you can specify an arbitrary callable with arguments:: + + >>> def f(x): return 1/x + ... + >>> raises(ZeroDivisionError, f, 0) + + >>> raises(ZeroDivisionError, f, x=0) + + + The form above is fully supported but discouraged for new code because the + context manager form is regarded as more readable and less error-prone. + + .. note:: + Similar to caught exception objects in Python, explicitly clearing + local references to returned ``ExceptionInfo`` objects can + help the Python interpreter speed up its garbage collection. + + Clearing those references breaks a reference cycle + (``ExceptionInfo`` --> caught exception --> frame stack raising + the exception --> current frame stack --> local variables --> + ``ExceptionInfo``) which makes Python keep all objects referenced + from that cycle (including all local variables in the current + frame) alive until the next cyclic garbage collection run. + More detailed information can be found in the official Python + documentation for :ref:`the try statement `. + """ + __tracebackhide__ = True + + if not expected_exception: + raise ValueError( + f"Expected an exception type or a tuple of exception types, but got `{expected_exception!r}`. " + f"Raising exceptions is already understood as failing the test, so you don't need " + f"any special code to say 'this should never raise an exception'." + ) + if isinstance(expected_exception, type): + expected_exceptions: Tuple[Type[E], ...] = (expected_exception,) + else: + expected_exceptions = expected_exception + for exc in expected_exceptions: + if not isinstance(exc, type) or not issubclass(exc, BaseException): + msg = "expected exception must be a BaseException type, not {}" # type: ignore[unreachable] + not_a = exc.__name__ if isinstance(exc, type) else type(exc).__name__ + raise TypeError(msg.format(not_a)) + + message = f"DID NOT RAISE {expected_exception}" + + if not args: + match: Optional[Union[str, Pattern[str]]] = kwargs.pop("match", None) + if kwargs: + msg = "Unexpected keyword arguments passed to pytest.raises: " + msg += ", ".join(sorted(kwargs)) + msg += "\nUse context-manager form instead?" + raise TypeError(msg) + return RaisesContext(expected_exception, message, match) + else: + func = args[0] + if not callable(func): + raise TypeError(f"{func!r} object (type: {type(func)}) must be callable") + try: + func(*args[1:], **kwargs) + except expected_exception as e: + return _pytest._code.ExceptionInfo.from_exception(e) + fail(message) + + +# This doesn't work with mypy for now. Use fail.Exception instead. +raises.Exception = fail.Exception # type: ignore + + +@final +class RaisesContext(ContextManager[_pytest._code.ExceptionInfo[E]]): + def __init__( + self, + expected_exception: Union[Type[E], Tuple[Type[E], ...]], + message: str, + match_expr: Optional[Union[str, Pattern[str]]] = None, + ) -> None: + self.expected_exception = expected_exception + self.message = message + self.match_expr = match_expr + self.excinfo: Optional[_pytest._code.ExceptionInfo[E]] = None + + def __enter__(self) -> _pytest._code.ExceptionInfo[E]: + self.excinfo = _pytest._code.ExceptionInfo.for_later() + return self.excinfo + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> bool: + __tracebackhide__ = True + if exc_type is None: + fail(self.message) + assert self.excinfo is not None + if not issubclass(exc_type, self.expected_exception): + return False + # Cast to narrow the exception type now that it's verified. + exc_info = cast(Tuple[Type[E], E, TracebackType], (exc_type, exc_val, exc_tb)) + self.excinfo.fill_unfilled(exc_info) + if self.match_expr is not None: + self.excinfo.match(self.match_expr) + return True diff --git a/venv/lib/python3.12/site-packages/_pytest/python_path.py b/venv/lib/python3.12/site-packages/_pytest/python_path.py new file mode 100644 index 0000000000..cceabbca12 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/python_path.py @@ -0,0 +1,24 @@ +import sys + +import pytest +from pytest import Config +from pytest import Parser + + +def pytest_addoption(parser: Parser) -> None: + parser.addini("pythonpath", type="paths", help="Add paths to sys.path", default=[]) + + +@pytest.hookimpl(tryfirst=True) +def pytest_load_initial_conftests(early_config: Config) -> None: + # `pythonpath = a b` will set `sys.path` to `[a, b, x, y, z, ...]` + for path in reversed(early_config.getini("pythonpath")): + sys.path.insert(0, str(path)) + + +@pytest.hookimpl(trylast=True) +def pytest_unconfigure(config: Config) -> None: + for path in config.getini("pythonpath"): + path_str = str(path) + if path_str in sys.path: + sys.path.remove(path_str) diff --git a/venv/lib/python3.12/site-packages/_pytest/recwarn.py b/venv/lib/python3.12/site-packages/_pytest/recwarn.py new file mode 100644 index 0000000000..d76ea020f1 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/recwarn.py @@ -0,0 +1,313 @@ +"""Record warnings during test function execution.""" +import re +import warnings +from pprint import pformat +from types import TracebackType +from typing import Any +from typing import Callable +from typing import Generator +from typing import Iterator +from typing import List +from typing import Optional +from typing import Pattern +from typing import Tuple +from typing import Type +from typing import TypeVar +from typing import Union + +from _pytest.compat import final +from _pytest.compat import overload +from _pytest.deprecated import check_ispytest +from _pytest.deprecated import WARNS_NONE_ARG +from _pytest.fixtures import fixture +from _pytest.outcomes import fail + + +T = TypeVar("T") + + +@fixture +def recwarn() -> Generator["WarningsRecorder", None, None]: + """Return a :class:`WarningsRecorder` instance that records all warnings emitted by test functions. + + See https://docs.pytest.org/en/latest/how-to/capture-warnings.html for information + on warning categories. + """ + wrec = WarningsRecorder(_ispytest=True) + with wrec: + warnings.simplefilter("default") + yield wrec + + +@overload +def deprecated_call( + *, match: Optional[Union[str, Pattern[str]]] = ... +) -> "WarningsRecorder": + ... + + +@overload +def deprecated_call( # noqa: F811 + func: Callable[..., T], *args: Any, **kwargs: Any +) -> T: + ... + + +def deprecated_call( # noqa: F811 + func: Optional[Callable[..., Any]] = None, *args: Any, **kwargs: Any +) -> Union["WarningsRecorder", Any]: + """Assert that code produces a ``DeprecationWarning`` or ``PendingDeprecationWarning``. + + This function can be used as a context manager:: + + >>> import warnings + >>> def api_call_v2(): + ... warnings.warn('use v3 of this api', DeprecationWarning) + ... return 200 + + >>> import pytest + >>> with pytest.deprecated_call(): + ... assert api_call_v2() == 200 + + It can also be used by passing a function and ``*args`` and ``**kwargs``, + in which case it will ensure calling ``func(*args, **kwargs)`` produces one of + the warnings types above. The return value is the return value of the function. + + In the context manager form you may use the keyword argument ``match`` to assert + that the warning matches a text or regex. + + The context manager produces a list of :class:`warnings.WarningMessage` objects, + one for each warning raised. + """ + __tracebackhide__ = True + if func is not None: + args = (func,) + args + return warns((DeprecationWarning, PendingDeprecationWarning), *args, **kwargs) + + +@overload +def warns( + expected_warning: Union[Type[Warning], Tuple[Type[Warning], ...]] = ..., + *, + match: Optional[Union[str, Pattern[str]]] = ..., +) -> "WarningsChecker": + ... + + +@overload +def warns( # noqa: F811 + expected_warning: Union[Type[Warning], Tuple[Type[Warning], ...]], + func: Callable[..., T], + *args: Any, + **kwargs: Any, +) -> T: + ... + + +def warns( # noqa: F811 + expected_warning: Union[Type[Warning], Tuple[Type[Warning], ...]] = Warning, + *args: Any, + match: Optional[Union[str, Pattern[str]]] = None, + **kwargs: Any, +) -> Union["WarningsChecker", Any]: + r"""Assert that code raises a particular class of warning. + + Specifically, the parameter ``expected_warning`` can be a warning class or sequence + of warning classes, and the code inside the ``with`` block must issue at least one + warning of that class or classes. + + This helper produces a list of :class:`warnings.WarningMessage` objects, one for + each warning raised (regardless of whether it is an ``expected_warning`` or not). + + This function can be used as a context manager, which will capture all the raised + warnings inside it:: + + >>> import pytest + >>> with pytest.warns(RuntimeWarning): + ... warnings.warn("my warning", RuntimeWarning) + + In the context manager form you may use the keyword argument ``match`` to assert + that the warning matches a text or regex:: + + >>> with pytest.warns(UserWarning, match='must be 0 or None'): + ... warnings.warn("value must be 0 or None", UserWarning) + + >>> with pytest.warns(UserWarning, match=r'must be \d+$'): + ... warnings.warn("value must be 42", UserWarning) + + >>> with pytest.warns(UserWarning, match=r'must be \d+$'): + ... warnings.warn("this is not here", UserWarning) + Traceback (most recent call last): + ... + Failed: DID NOT WARN. No warnings of type ...UserWarning... were emitted... + + **Using with** ``pytest.mark.parametrize`` + + When using :ref:`pytest.mark.parametrize ref` it is possible to parametrize tests + such that some runs raise a warning and others do not. + + This could be achieved in the same way as with exceptions, see + :ref:`parametrizing_conditional_raising` for an example. + + """ + __tracebackhide__ = True + if not args: + if kwargs: + argnames = ", ".join(sorted(kwargs)) + raise TypeError( + f"Unexpected keyword arguments passed to pytest.warns: {argnames}" + "\nUse context-manager form instead?" + ) + return WarningsChecker(expected_warning, match_expr=match, _ispytest=True) + else: + func = args[0] + if not callable(func): + raise TypeError(f"{func!r} object (type: {type(func)}) must be callable") + with WarningsChecker(expected_warning, _ispytest=True): + return func(*args[1:], **kwargs) + + +class WarningsRecorder(warnings.catch_warnings): # type:ignore[type-arg] + """A context manager to record raised warnings. + + Each recorded warning is an instance of :class:`warnings.WarningMessage`. + + Adapted from `warnings.catch_warnings`. + + .. note:: + ``DeprecationWarning`` and ``PendingDeprecationWarning`` are treated + differently; see :ref:`ensuring_function_triggers`. + + """ + + def __init__(self, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + # Type ignored due to the way typeshed handles warnings.catch_warnings. + super().__init__(record=True) # type: ignore[call-arg] + self._entered = False + self._list: List[warnings.WarningMessage] = [] + + @property + def list(self) -> List["warnings.WarningMessage"]: + """The list of recorded warnings.""" + return self._list + + def __getitem__(self, i: int) -> "warnings.WarningMessage": + """Get a recorded warning by index.""" + return self._list[i] + + def __iter__(self) -> Iterator["warnings.WarningMessage"]: + """Iterate through the recorded warnings.""" + return iter(self._list) + + def __len__(self) -> int: + """The number of recorded warnings.""" + return len(self._list) + + def pop(self, cls: Type[Warning] = Warning) -> "warnings.WarningMessage": + """Pop the first recorded warning, raise exception if not exists.""" + for i, w in enumerate(self._list): + if issubclass(w.category, cls): + return self._list.pop(i) + __tracebackhide__ = True + raise AssertionError(f"{cls!r} not found in warning list") + + def clear(self) -> None: + """Clear the list of recorded warnings.""" + self._list[:] = [] + + # Type ignored because it doesn't exactly warnings.catch_warnings.__enter__ + # -- it returns a List but we only emulate one. + def __enter__(self) -> "WarningsRecorder": # type: ignore + if self._entered: + __tracebackhide__ = True + raise RuntimeError(f"Cannot enter {self!r} twice") + _list = super().__enter__() + # record=True means it's None. + assert _list is not None + self._list = _list + warnings.simplefilter("always") + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + if not self._entered: + __tracebackhide__ = True + raise RuntimeError(f"Cannot exit {self!r} without entering first") + + super().__exit__(exc_type, exc_val, exc_tb) + + # Built-in catch_warnings does not reset entered state so we do it + # manually here for this context manager to become reusable. + self._entered = False + + +@final +class WarningsChecker(WarningsRecorder): + def __init__( + self, + expected_warning: Optional[ + Union[Type[Warning], Tuple[Type[Warning], ...]] + ] = Warning, + match_expr: Optional[Union[str, Pattern[str]]] = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + super().__init__(_ispytest=True) + + msg = "exceptions must be derived from Warning, not %s" + if expected_warning is None: + warnings.warn(WARNS_NONE_ARG, stacklevel=4) + expected_warning_tup = None + elif isinstance(expected_warning, tuple): + for exc in expected_warning: + if not issubclass(exc, Warning): + raise TypeError(msg % type(exc)) + expected_warning_tup = expected_warning + elif issubclass(expected_warning, Warning): + expected_warning_tup = (expected_warning,) + else: + raise TypeError(msg % type(expected_warning)) + + self.expected_warning = expected_warning_tup + self.match_expr = match_expr + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + super().__exit__(exc_type, exc_val, exc_tb) + + __tracebackhide__ = True + + def found_str(): + return pformat([record.message for record in self], indent=2) + + # only check if we're not currently handling an exception + if exc_type is None and exc_val is None and exc_tb is None: + if self.expected_warning is not None: + if not any(issubclass(r.category, self.expected_warning) for r in self): + __tracebackhide__ = True + fail( + f"DID NOT WARN. No warnings of type {self.expected_warning} were emitted.\n" + f"The list of emitted warnings is: {found_str()}." + ) + elif self.match_expr is not None: + for r in self: + if issubclass(r.category, self.expected_warning): + if re.compile(self.match_expr).search(str(r.message)): + break + else: + fail( + f"""\ +DID NOT WARN. No warnings of type {self.expected_warning} matching the regex were emitted. + Regex: {self.match_expr} + Emitted warnings: {found_str()}""" + ) diff --git a/venv/lib/python3.12/site-packages/_pytest/reports.py b/venv/lib/python3.12/site-packages/_pytest/reports.py new file mode 100644 index 0000000000..74e8794b23 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/reports.py @@ -0,0 +1,622 @@ +import dataclasses +import os +from io import StringIO +from pprint import pprint +from typing import Any +from typing import cast +from typing import Dict +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Mapping +from typing import NoReturn +from typing import Optional +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from _pytest._code.code import ExceptionChainRepr +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import ExceptionRepr +from _pytest._code.code import ReprEntry +from _pytest._code.code import ReprEntryNative +from _pytest._code.code import ReprExceptionInfo +from _pytest._code.code import ReprFileLocation +from _pytest._code.code import ReprFuncArgs +from _pytest._code.code import ReprLocals +from _pytest._code.code import ReprTraceback +from _pytest._code.code import TerminalRepr +from _pytest._io import TerminalWriter +from _pytest.compat import final +from _pytest.config import Config +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.outcomes import skip + +if TYPE_CHECKING: + from typing_extensions import Literal + + from _pytest.runner import CallInfo + + +def getworkerinfoline(node): + try: + return node._workerinfocache + except AttributeError: + d = node.workerinfo + ver = "%s.%s.%s" % d["version_info"][:3] + node._workerinfocache = s = "[{}] {} -- Python {} {}".format( + d["id"], d["sysplatform"], ver, d["executable"] + ) + return s + + +_R = TypeVar("_R", bound="BaseReport") + + +class BaseReport: + when: Optional[str] + location: Optional[Tuple[str, Optional[int], str]] + longrepr: Union[ + None, ExceptionInfo[BaseException], Tuple[str, int, str], str, TerminalRepr + ] + sections: List[Tuple[str, str]] + nodeid: str + outcome: "Literal['passed', 'failed', 'skipped']" + + def __init__(self, **kw: Any) -> None: + self.__dict__.update(kw) + + if TYPE_CHECKING: + # Can have arbitrary fields given to __init__(). + def __getattr__(self, key: str) -> Any: + ... + + def toterminal(self, out: TerminalWriter) -> None: + if hasattr(self, "node"): + worker_info = getworkerinfoline(self.node) + if worker_info: + out.line(worker_info) + + longrepr = self.longrepr + if longrepr is None: + return + + if hasattr(longrepr, "toterminal"): + longrepr_terminal = cast(TerminalRepr, longrepr) + longrepr_terminal.toterminal(out) + else: + try: + s = str(longrepr) + except UnicodeEncodeError: + s = "" + out.line(s) + + def get_sections(self, prefix: str) -> Iterator[Tuple[str, str]]: + for name, content in self.sections: + if name.startswith(prefix): + yield prefix, content + + @property + def longreprtext(self) -> str: + """Read-only property that returns the full string representation of + ``longrepr``. + + .. versionadded:: 3.0 + """ + file = StringIO() + tw = TerminalWriter(file) + tw.hasmarkup = False + self.toterminal(tw) + exc = file.getvalue() + return exc.strip() + + @property + def caplog(self) -> str: + """Return captured log lines, if log capturing is enabled. + + .. versionadded:: 3.5 + """ + return "\n".join( + content for (prefix, content) in self.get_sections("Captured log") + ) + + @property + def capstdout(self) -> str: + """Return captured text from stdout, if capturing is enabled. + + .. versionadded:: 3.0 + """ + return "".join( + content for (prefix, content) in self.get_sections("Captured stdout") + ) + + @property + def capstderr(self) -> str: + """Return captured text from stderr, if capturing is enabled. + + .. versionadded:: 3.0 + """ + return "".join( + content for (prefix, content) in self.get_sections("Captured stderr") + ) + + @property + def passed(self) -> bool: + """Whether the outcome is passed.""" + return self.outcome == "passed" + + @property + def failed(self) -> bool: + """Whether the outcome is failed.""" + return self.outcome == "failed" + + @property + def skipped(self) -> bool: + """Whether the outcome is skipped.""" + return self.outcome == "skipped" + + @property + def fspath(self) -> str: + """The path portion of the reported node, as a string.""" + return self.nodeid.split("::")[0] + + @property + def count_towards_summary(self) -> bool: + """**Experimental** Whether this report should be counted towards the + totals shown at the end of the test session: "1 passed, 1 failure, etc". + + .. note:: + + This function is considered **experimental**, so beware that it is subject to changes + even in patch releases. + """ + return True + + @property + def head_line(self) -> Optional[str]: + """**Experimental** The head line shown with longrepr output for this + report, more commonly during traceback representation during + failures:: + + ________ Test.foo ________ + + + In the example above, the head_line is "Test.foo". + + .. note:: + + This function is considered **experimental**, so beware that it is subject to changes + even in patch releases. + """ + if self.location is not None: + fspath, lineno, domain = self.location + return domain + return None + + def _get_verbose_word(self, config: Config): + _category, _short, verbose = config.hook.pytest_report_teststatus( + report=self, config=config + ) + return verbose + + def _to_json(self) -> Dict[str, Any]: + """Return the contents of this report as a dict of builtin entries, + suitable for serialization. + + This was originally the serialize_report() function from xdist (ca03269). + + Experimental method. + """ + return _report_to_json(self) + + @classmethod + def _from_json(cls: Type[_R], reportdict: Dict[str, object]) -> _R: + """Create either a TestReport or CollectReport, depending on the calling class. + + It is the callers responsibility to know which class to pass here. + + This was originally the serialize_report() function from xdist (ca03269). + + Experimental method. + """ + kwargs = _report_kwargs_from_json(reportdict) + return cls(**kwargs) + + +def _report_unserialization_failure( + type_name: str, report_class: Type[BaseReport], reportdict +) -> NoReturn: + url = "https://github.com/pytest-dev/pytest/issues" + stream = StringIO() + pprint("-" * 100, stream=stream) + pprint("INTERNALERROR: Unknown entry type returned: %s" % type_name, stream=stream) + pprint("report_name: %s" % report_class, stream=stream) + pprint(reportdict, stream=stream) + pprint("Please report this bug at %s" % url, stream=stream) + pprint("-" * 100, stream=stream) + raise RuntimeError(stream.getvalue()) + + +@final +class TestReport(BaseReport): + """Basic test report object (also used for setup and teardown calls if + they fail). + + Reports can contain arbitrary extra attributes. + """ + + __test__ = False + + def __init__( + self, + nodeid: str, + location: Tuple[str, Optional[int], str], + keywords: Mapping[str, Any], + outcome: "Literal['passed', 'failed', 'skipped']", + longrepr: Union[ + None, ExceptionInfo[BaseException], Tuple[str, int, str], str, TerminalRepr + ], + when: "Literal['setup', 'call', 'teardown']", + sections: Iterable[Tuple[str, str]] = (), + duration: float = 0, + start: float = 0, + stop: float = 0, + user_properties: Optional[Iterable[Tuple[str, object]]] = None, + **extra, + ) -> None: + #: Normalized collection nodeid. + self.nodeid = nodeid + + #: A (filesystempath, lineno, domaininfo) tuple indicating the + #: actual location of a test item - it might be different from the + #: collected one e.g. if a method is inherited from a different module. + #: The filesystempath may be relative to ``config.rootdir``. + #: The line number is 0-based. + self.location: Tuple[str, Optional[int], str] = location + + #: A name -> value dictionary containing all keywords and + #: markers associated with a test invocation. + self.keywords: Mapping[str, Any] = keywords + + #: Test outcome, always one of "passed", "failed", "skipped". + self.outcome = outcome + + #: None or a failure representation. + self.longrepr = longrepr + + #: One of 'setup', 'call', 'teardown' to indicate runtest phase. + self.when = when + + #: User properties is a list of tuples (name, value) that holds user + #: defined properties of the test. + self.user_properties = list(user_properties or []) + + #: Tuples of str ``(heading, content)`` with extra information + #: for the test report. Used by pytest to add text captured + #: from ``stdout``, ``stderr``, and intercepted logging events. May + #: be used by other plugins to add arbitrary information to reports. + self.sections = list(sections) + + #: Time it took to run just the test. + self.duration: float = duration + + #: The system time when the call started, in seconds since the epoch. + self.start: float = start + #: The system time when the call ended, in seconds since the epoch. + self.stop: float = stop + + self.__dict__.update(extra) + + def __repr__(self) -> str: + return "<{} {!r} when={!r} outcome={!r}>".format( + self.__class__.__name__, self.nodeid, self.when, self.outcome + ) + + @classmethod + def from_item_and_call(cls, item: Item, call: "CallInfo[None]") -> "TestReport": + """Create and fill a TestReport with standard item and call info. + + :param item: The item. + :param call: The call info. + """ + when = call.when + # Remove "collect" from the Literal type -- only for collection calls. + assert when != "collect" + duration = call.duration + start = call.start + stop = call.stop + keywords = {x: 1 for x in item.keywords} + excinfo = call.excinfo + sections = [] + if not call.excinfo: + outcome: Literal["passed", "failed", "skipped"] = "passed" + longrepr: Union[ + None, + ExceptionInfo[BaseException], + Tuple[str, int, str], + str, + TerminalRepr, + ] = None + else: + if not isinstance(excinfo, ExceptionInfo): + outcome = "failed" + longrepr = excinfo + elif isinstance(excinfo.value, skip.Exception): + outcome = "skipped" + r = excinfo._getreprcrash() + assert ( + r is not None + ), "There should always be a traceback entry for skipping a test." + if excinfo.value._use_item_location: + path, line = item.reportinfo()[:2] + assert line is not None + longrepr = os.fspath(path), line + 1, r.message + else: + longrepr = (str(r.path), r.lineno, r.message) + else: + outcome = "failed" + if call.when == "call": + longrepr = item.repr_failure(excinfo) + else: # exception in setup or teardown + longrepr = item._repr_failure_py( + excinfo, style=item.config.getoption("tbstyle", "auto") + ) + for rwhen, key, content in item._report_sections: + sections.append((f"Captured {key} {rwhen}", content)) + return cls( + item.nodeid, + item.location, + keywords, + outcome, + longrepr, + when, + sections, + duration, + start, + stop, + user_properties=item.user_properties, + ) + + +@final +class CollectReport(BaseReport): + """Collection report object. + + Reports can contain arbitrary extra attributes. + """ + + when = "collect" + + def __init__( + self, + nodeid: str, + outcome: "Literal['passed', 'failed', 'skipped']", + longrepr: Union[ + None, ExceptionInfo[BaseException], Tuple[str, int, str], str, TerminalRepr + ], + result: Optional[List[Union[Item, Collector]]], + sections: Iterable[Tuple[str, str]] = (), + **extra, + ) -> None: + #: Normalized collection nodeid. + self.nodeid = nodeid + + #: Test outcome, always one of "passed", "failed", "skipped". + self.outcome = outcome + + #: None or a failure representation. + self.longrepr = longrepr + + #: The collected items and collection nodes. + self.result = result or [] + + #: Tuples of str ``(heading, content)`` with extra information + #: for the test report. Used by pytest to add text captured + #: from ``stdout``, ``stderr``, and intercepted logging events. May + #: be used by other plugins to add arbitrary information to reports. + self.sections = list(sections) + + self.__dict__.update(extra) + + @property + def location( # type:ignore[override] + self, + ) -> Optional[Tuple[str, Optional[int], str]]: + return (self.fspath, None, self.fspath) + + def __repr__(self) -> str: + return "".format( + self.nodeid, len(self.result), self.outcome + ) + + +class CollectErrorRepr(TerminalRepr): + def __init__(self, msg: str) -> None: + self.longrepr = msg + + def toterminal(self, out: TerminalWriter) -> None: + out.line(self.longrepr, red=True) + + +def pytest_report_to_serializable( + report: Union[CollectReport, TestReport] +) -> Optional[Dict[str, Any]]: + if isinstance(report, (TestReport, CollectReport)): + data = report._to_json() + data["$report_type"] = report.__class__.__name__ + return data + # TODO: Check if this is actually reachable. + return None # type: ignore[unreachable] + + +def pytest_report_from_serializable( + data: Dict[str, Any], +) -> Optional[Union[CollectReport, TestReport]]: + if "$report_type" in data: + if data["$report_type"] == "TestReport": + return TestReport._from_json(data) + elif data["$report_type"] == "CollectReport": + return CollectReport._from_json(data) + assert False, "Unknown report_type unserialize data: {}".format( + data["$report_type"] + ) + return None + + +def _report_to_json(report: BaseReport) -> Dict[str, Any]: + """Return the contents of this report as a dict of builtin entries, + suitable for serialization. + + This was originally the serialize_report() function from xdist (ca03269). + """ + + def serialize_repr_entry( + entry: Union[ReprEntry, ReprEntryNative] + ) -> Dict[str, Any]: + data = dataclasses.asdict(entry) + for key, value in data.items(): + if hasattr(value, "__dict__"): + data[key] = dataclasses.asdict(value) + entry_data = {"type": type(entry).__name__, "data": data} + return entry_data + + def serialize_repr_traceback(reprtraceback: ReprTraceback) -> Dict[str, Any]: + result = dataclasses.asdict(reprtraceback) + result["reprentries"] = [ + serialize_repr_entry(x) for x in reprtraceback.reprentries + ] + return result + + def serialize_repr_crash( + reprcrash: Optional[ReprFileLocation], + ) -> Optional[Dict[str, Any]]: + if reprcrash is not None: + return dataclasses.asdict(reprcrash) + else: + return None + + def serialize_exception_longrepr(rep: BaseReport) -> Dict[str, Any]: + assert rep.longrepr is not None + # TODO: Investigate whether the duck typing is really necessary here. + longrepr = cast(ExceptionRepr, rep.longrepr) + result: Dict[str, Any] = { + "reprcrash": serialize_repr_crash(longrepr.reprcrash), + "reprtraceback": serialize_repr_traceback(longrepr.reprtraceback), + "sections": longrepr.sections, + } + if isinstance(longrepr, ExceptionChainRepr): + result["chain"] = [] + for repr_traceback, repr_crash, description in longrepr.chain: + result["chain"].append( + ( + serialize_repr_traceback(repr_traceback), + serialize_repr_crash(repr_crash), + description, + ) + ) + else: + result["chain"] = None + return result + + d = report.__dict__.copy() + if hasattr(report.longrepr, "toterminal"): + if hasattr(report.longrepr, "reprtraceback") and hasattr( + report.longrepr, "reprcrash" + ): + d["longrepr"] = serialize_exception_longrepr(report) + else: + d["longrepr"] = str(report.longrepr) + else: + d["longrepr"] = report.longrepr + for name in d: + if isinstance(d[name], os.PathLike): + d[name] = os.fspath(d[name]) + elif name == "result": + d[name] = None # for now + return d + + +def _report_kwargs_from_json(reportdict: Dict[str, Any]) -> Dict[str, Any]: + """Return **kwargs that can be used to construct a TestReport or + CollectReport instance. + + This was originally the serialize_report() function from xdist (ca03269). + """ + + def deserialize_repr_entry(entry_data): + data = entry_data["data"] + entry_type = entry_data["type"] + if entry_type == "ReprEntry": + reprfuncargs = None + reprfileloc = None + reprlocals = None + if data["reprfuncargs"]: + reprfuncargs = ReprFuncArgs(**data["reprfuncargs"]) + if data["reprfileloc"]: + reprfileloc = ReprFileLocation(**data["reprfileloc"]) + if data["reprlocals"]: + reprlocals = ReprLocals(data["reprlocals"]["lines"]) + + reprentry: Union[ReprEntry, ReprEntryNative] = ReprEntry( + lines=data["lines"], + reprfuncargs=reprfuncargs, + reprlocals=reprlocals, + reprfileloc=reprfileloc, + style=data["style"], + ) + elif entry_type == "ReprEntryNative": + reprentry = ReprEntryNative(data["lines"]) + else: + _report_unserialization_failure(entry_type, TestReport, reportdict) + return reprentry + + def deserialize_repr_traceback(repr_traceback_dict): + repr_traceback_dict["reprentries"] = [ + deserialize_repr_entry(x) for x in repr_traceback_dict["reprentries"] + ] + return ReprTraceback(**repr_traceback_dict) + + def deserialize_repr_crash(repr_crash_dict: Optional[Dict[str, Any]]): + if repr_crash_dict is not None: + return ReprFileLocation(**repr_crash_dict) + else: + return None + + if ( + reportdict["longrepr"] + and "reprcrash" in reportdict["longrepr"] + and "reprtraceback" in reportdict["longrepr"] + ): + reprtraceback = deserialize_repr_traceback( + reportdict["longrepr"]["reprtraceback"] + ) + reprcrash = deserialize_repr_crash(reportdict["longrepr"]["reprcrash"]) + if reportdict["longrepr"]["chain"]: + chain = [] + for repr_traceback_data, repr_crash_data, description in reportdict[ + "longrepr" + ]["chain"]: + chain.append( + ( + deserialize_repr_traceback(repr_traceback_data), + deserialize_repr_crash(repr_crash_data), + description, + ) + ) + exception_info: Union[ + ExceptionChainRepr, ReprExceptionInfo + ] = ExceptionChainRepr(chain) + else: + exception_info = ReprExceptionInfo( + reprtraceback=reprtraceback, + reprcrash=reprcrash, + ) + + for section in reportdict["longrepr"]["sections"]: + exception_info.addsection(*section) + reportdict["longrepr"] = exception_info + + return reportdict diff --git a/venv/lib/python3.12/site-packages/_pytest/runner.py b/venv/lib/python3.12/site-packages/_pytest/runner.py new file mode 100644 index 0000000000..f861c05a45 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/runner.py @@ -0,0 +1,551 @@ +"""Basic collect and runtest protocol implementations.""" +import bdb +import dataclasses +import os +import sys +from typing import Callable +from typing import cast +from typing import Dict +from typing import Generic +from typing import List +from typing import Optional +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union + +from .reports import BaseReport +from .reports import CollectErrorRepr +from .reports import CollectReport +from .reports import TestReport +from _pytest import timing +from _pytest._code.code import ExceptionChainRepr +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import TerminalRepr +from _pytest.compat import final +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.nodes import Node +from _pytest.outcomes import Exit +from _pytest.outcomes import OutcomeException +from _pytest.outcomes import Skipped +from _pytest.outcomes import TEST_OUTCOME + +if sys.version_info[:2] < (3, 11): + from exceptiongroup import BaseExceptionGroup + +if TYPE_CHECKING: + from typing_extensions import Literal + + from _pytest.main import Session + from _pytest.terminal import TerminalReporter + +# +# pytest plugin hooks. + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("terminal reporting", "Reporting", after="general") + group.addoption( + "--durations", + action="store", + type=int, + default=None, + metavar="N", + help="Show N slowest setup/test durations (N=0 for all)", + ) + group.addoption( + "--durations-min", + action="store", + type=float, + default=0.005, + metavar="N", + help="Minimal duration in seconds for inclusion in slowest list. " + "Default: 0.005.", + ) + + +def pytest_terminal_summary(terminalreporter: "TerminalReporter") -> None: + durations = terminalreporter.config.option.durations + durations_min = terminalreporter.config.option.durations_min + verbose = terminalreporter.config.getvalue("verbose") + if durations is None: + return + tr = terminalreporter + dlist = [] + for replist in tr.stats.values(): + for rep in replist: + if hasattr(rep, "duration"): + dlist.append(rep) + if not dlist: + return + dlist.sort(key=lambda x: x.duration, reverse=True) # type: ignore[no-any-return] + if not durations: + tr.write_sep("=", "slowest durations") + else: + tr.write_sep("=", "slowest %s durations" % durations) + dlist = dlist[:durations] + + for i, rep in enumerate(dlist): + if verbose < 2 and rep.duration < durations_min: + tr.write_line("") + tr.write_line( + "(%s durations < %gs hidden. Use -vv to show these durations.)" + % (len(dlist) - i, durations_min) + ) + break + tr.write_line(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}") + + +def pytest_sessionstart(session: "Session") -> None: + session._setupstate = SetupState() + + +def pytest_sessionfinish(session: "Session") -> None: + session._setupstate.teardown_exact(None) + + +def pytest_runtest_protocol(item: Item, nextitem: Optional[Item]) -> bool: + ihook = item.ihook + ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location) + runtestprotocol(item, nextitem=nextitem) + ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location) + return True + + +def runtestprotocol( + item: Item, log: bool = True, nextitem: Optional[Item] = None +) -> List[TestReport]: + hasrequest = hasattr(item, "_request") + if hasrequest and not item._request: # type: ignore[attr-defined] + # This only happens if the item is re-run, as is done by + # pytest-rerunfailures. + item._initrequest() # type: ignore[attr-defined] + rep = call_and_report(item, "setup", log) + reports = [rep] + if rep.passed: + if item.config.getoption("setupshow", False): + show_test_item(item) + if not item.config.getoption("setuponly", False): + reports.append(call_and_report(item, "call", log)) + reports.append(call_and_report(item, "teardown", log, nextitem=nextitem)) + # After all teardown hooks have been called + # want funcargs and request info to go away. + if hasrequest: + item._request = False # type: ignore[attr-defined] + item.funcargs = None # type: ignore[attr-defined] + return reports + + +def show_test_item(item: Item) -> None: + """Show test function, parameters and the fixtures of the test item.""" + tw = item.config.get_terminal_writer() + tw.line() + tw.write(" " * 8) + tw.write(item.nodeid) + used_fixtures = sorted(getattr(item, "fixturenames", [])) + if used_fixtures: + tw.write(" (fixtures used: {})".format(", ".join(used_fixtures))) + tw.flush() + + +def pytest_runtest_setup(item: Item) -> None: + _update_current_test_var(item, "setup") + item.session._setupstate.setup(item) + + +def pytest_runtest_call(item: Item) -> None: + _update_current_test_var(item, "call") + try: + del sys.last_type + del sys.last_value + del sys.last_traceback + except AttributeError: + pass + try: + item.runtest() + except Exception as e: + # Store trace info to allow postmortem debugging + sys.last_type = type(e) + sys.last_value = e + assert e.__traceback__ is not None + # Skip *this* frame + sys.last_traceback = e.__traceback__.tb_next + raise e + + +def pytest_runtest_teardown(item: Item, nextitem: Optional[Item]) -> None: + _update_current_test_var(item, "teardown") + item.session._setupstate.teardown_exact(nextitem) + _update_current_test_var(item, None) + + +def _update_current_test_var( + item: Item, when: Optional["Literal['setup', 'call', 'teardown']"] +) -> None: + """Update :envvar:`PYTEST_CURRENT_TEST` to reflect the current item and stage. + + If ``when`` is None, delete ``PYTEST_CURRENT_TEST`` from the environment. + """ + var_name = "PYTEST_CURRENT_TEST" + if when: + value = f"{item.nodeid} ({when})" + # don't allow null bytes on environment variables (see #2644, #2957) + value = value.replace("\x00", "(null)") + os.environ[var_name] = value + else: + os.environ.pop(var_name) + + +def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]: + if report.when in ("setup", "teardown"): + if report.failed: + # category, shortletter, verbose-word + return "error", "E", "ERROR" + elif report.skipped: + return "skipped", "s", "SKIPPED" + else: + return "", "", "" + return None + + +# +# Implementation + + +def call_and_report( + item: Item, when: "Literal['setup', 'call', 'teardown']", log: bool = True, **kwds +) -> TestReport: + call = call_runtest_hook(item, when, **kwds) + hook = item.ihook + report: TestReport = hook.pytest_runtest_makereport(item=item, call=call) + if log: + hook.pytest_runtest_logreport(report=report) + if check_interactive_exception(call, report): + hook.pytest_exception_interact(node=item, call=call, report=report) + return report + + +def check_interactive_exception(call: "CallInfo[object]", report: BaseReport) -> bool: + """Check whether the call raised an exception that should be reported as + interactive.""" + if call.excinfo is None: + # Didn't raise. + return False + if hasattr(report, "wasxfail"): + # Exception was expected. + return False + if isinstance(call.excinfo.value, (Skipped, bdb.BdbQuit)): + # Special control flow exception. + return False + return True + + +def call_runtest_hook( + item: Item, when: "Literal['setup', 'call', 'teardown']", **kwds +) -> "CallInfo[None]": + if when == "setup": + ihook: Callable[..., None] = item.ihook.pytest_runtest_setup + elif when == "call": + ihook = item.ihook.pytest_runtest_call + elif when == "teardown": + ihook = item.ihook.pytest_runtest_teardown + else: + assert False, f"Unhandled runtest hook case: {when}" + reraise: Tuple[Type[BaseException], ...] = (Exit,) + if not item.config.getoption("usepdb", False): + reraise += (KeyboardInterrupt,) + return CallInfo.from_call( + lambda: ihook(item=item, **kwds), when=when, reraise=reraise + ) + + +TResult = TypeVar("TResult", covariant=True) + + +@final +@dataclasses.dataclass +class CallInfo(Generic[TResult]): + """Result/Exception info of a function invocation.""" + + _result: Optional[TResult] + #: The captured exception of the call, if it raised. + excinfo: Optional[ExceptionInfo[BaseException]] + #: The system time when the call started, in seconds since the epoch. + start: float + #: The system time when the call ended, in seconds since the epoch. + stop: float + #: The call duration, in seconds. + duration: float + #: The context of invocation: "collect", "setup", "call" or "teardown". + when: "Literal['collect', 'setup', 'call', 'teardown']" + + def __init__( + self, + result: Optional[TResult], + excinfo: Optional[ExceptionInfo[BaseException]], + start: float, + stop: float, + duration: float, + when: "Literal['collect', 'setup', 'call', 'teardown']", + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._result = result + self.excinfo = excinfo + self.start = start + self.stop = stop + self.duration = duration + self.when = when + + @property + def result(self) -> TResult: + """The return value of the call, if it didn't raise. + + Can only be accessed if excinfo is None. + """ + if self.excinfo is not None: + raise AttributeError(f"{self!r} has no valid result") + # The cast is safe because an exception wasn't raised, hence + # _result has the expected function return type (which may be + # None, that's why a cast and not an assert). + return cast(TResult, self._result) + + @classmethod + def from_call( + cls, + func: "Callable[[], TResult]", + when: "Literal['collect', 'setup', 'call', 'teardown']", + reraise: Optional[ + Union[Type[BaseException], Tuple[Type[BaseException], ...]] + ] = None, + ) -> "CallInfo[TResult]": + """Call func, wrapping the result in a CallInfo. + + :param func: + The function to call. Called without arguments. + :param when: + The phase in which the function is called. + :param reraise: + Exception or exceptions that shall propagate if raised by the + function, instead of being wrapped in the CallInfo. + """ + excinfo = None + start = timing.time() + precise_start = timing.perf_counter() + try: + result: Optional[TResult] = func() + except BaseException: + excinfo = ExceptionInfo.from_current() + if reraise is not None and isinstance(excinfo.value, reraise): + raise + result = None + # use the perf counter + precise_stop = timing.perf_counter() + duration = precise_stop - precise_start + stop = timing.time() + return cls( + start=start, + stop=stop, + duration=duration, + when=when, + result=result, + excinfo=excinfo, + _ispytest=True, + ) + + def __repr__(self) -> str: + if self.excinfo is None: + return f"" + return f"" + + +def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> TestReport: + return TestReport.from_item_and_call(item, call) + + +def pytest_make_collect_report(collector: Collector) -> CollectReport: + call = CallInfo.from_call(lambda: list(collector.collect()), "collect") + longrepr: Union[None, Tuple[str, int, str], str, TerminalRepr] = None + if not call.excinfo: + outcome: Literal["passed", "skipped", "failed"] = "passed" + else: + skip_exceptions = [Skipped] + unittest = sys.modules.get("unittest") + if unittest is not None: + # Type ignored because unittest is loaded dynamically. + skip_exceptions.append(unittest.SkipTest) # type: ignore + if isinstance(call.excinfo.value, tuple(skip_exceptions)): + outcome = "skipped" + r_ = collector._repr_failure_py(call.excinfo, "line") + assert isinstance(r_, ExceptionChainRepr), repr(r_) + r = r_.reprcrash + assert r + longrepr = (str(r.path), r.lineno, r.message) + else: + outcome = "failed" + errorinfo = collector.repr_failure(call.excinfo) + if not hasattr(errorinfo, "toterminal"): + assert isinstance(errorinfo, str) + errorinfo = CollectErrorRepr(errorinfo) + longrepr = errorinfo + result = call.result if not call.excinfo else None + rep = CollectReport(collector.nodeid, outcome, longrepr, result) + rep.call = call # type: ignore # see collect_one_node + return rep + + +class SetupState: + """Shared state for setting up/tearing down test items or collectors + in a session. + + Suppose we have a collection tree as follows: + + + + + + + + The SetupState maintains a stack. The stack starts out empty: + + [] + + During the setup phase of item1, setup(item1) is called. What it does + is: + + push session to stack, run session.setup() + push mod1 to stack, run mod1.setup() + push item1 to stack, run item1.setup() + + The stack is: + + [session, mod1, item1] + + While the stack is in this shape, it is allowed to add finalizers to + each of session, mod1, item1 using addfinalizer(). + + During the teardown phase of item1, teardown_exact(item2) is called, + where item2 is the next item to item1. What it does is: + + pop item1 from stack, run its teardowns + pop mod1 from stack, run its teardowns + + mod1 was popped because it ended its purpose with item1. The stack is: + + [session] + + During the setup phase of item2, setup(item2) is called. What it does + is: + + push mod2 to stack, run mod2.setup() + push item2 to stack, run item2.setup() + + Stack: + + [session, mod2, item2] + + During the teardown phase of item2, teardown_exact(None) is called, + because item2 is the last item. What it does is: + + pop item2 from stack, run its teardowns + pop mod2 from stack, run its teardowns + pop session from stack, run its teardowns + + Stack: + + [] + + The end! + """ + + def __init__(self) -> None: + # The stack is in the dict insertion order. + self.stack: Dict[ + Node, + Tuple[ + # Node's finalizers. + List[Callable[[], object]], + # Node's exception, if its setup raised. + Optional[Union[OutcomeException, Exception]], + ], + ] = {} + + def setup(self, item: Item) -> None: + """Setup objects along the collector chain to the item.""" + needed_collectors = item.listchain() + + # If a collector fails its setup, fail its entire subtree of items. + # The setup is not retried for each item - the same exception is used. + for col, (finalizers, exc) in self.stack.items(): + assert col in needed_collectors, "previous item was not torn down properly" + if exc: + raise exc + + for col in needed_collectors[len(self.stack) :]: + assert col not in self.stack + # Push onto the stack. + self.stack[col] = ([col.teardown], None) + try: + col.setup() + except TEST_OUTCOME as exc: + self.stack[col] = (self.stack[col][0], exc) + raise exc + + def addfinalizer(self, finalizer: Callable[[], object], node: Node) -> None: + """Attach a finalizer to the given node. + + The node must be currently active in the stack. + """ + assert node and not isinstance(node, tuple) + assert callable(finalizer) + assert node in self.stack, (node, self.stack) + self.stack[node][0].append(finalizer) + + def teardown_exact(self, nextitem: Optional[Item]) -> None: + """Teardown the current stack up until reaching nodes that nextitem + also descends from. + + When nextitem is None (meaning we're at the last item), the entire + stack is torn down. + """ + needed_collectors = nextitem and nextitem.listchain() or [] + exceptions: List[BaseException] = [] + while self.stack: + if list(self.stack.keys()) == needed_collectors[: len(self.stack)]: + break + node, (finalizers, _) = self.stack.popitem() + these_exceptions = [] + while finalizers: + fin = finalizers.pop() + try: + fin() + except TEST_OUTCOME as e: + these_exceptions.append(e) + + if len(these_exceptions) == 1: + exceptions.extend(these_exceptions) + elif these_exceptions: + msg = f"errors while tearing down {node!r}" + exceptions.append(BaseExceptionGroup(msg, these_exceptions[::-1])) + + if len(exceptions) == 1: + raise exceptions[0] + elif exceptions: + raise BaseExceptionGroup("errors during test teardown", exceptions[::-1]) + if nextitem is None: + assert not self.stack + + +def collect_one_node(collector: Collector) -> CollectReport: + ihook = collector.ihook + ihook.pytest_collectstart(collector=collector) + rep: CollectReport = ihook.pytest_make_collect_report(collector=collector) + call = rep.__dict__.pop("call", None) + if call and check_interactive_exception(call, rep): + ihook.pytest_exception_interact(node=collector, call=call, report=rep) + return rep diff --git a/venv/lib/python3.12/site-packages/_pytest/scope.py b/venv/lib/python3.12/site-packages/_pytest/scope.py new file mode 100644 index 0000000000..7a746fb9fa --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/scope.py @@ -0,0 +1,91 @@ +""" +Scope definition and related utilities. + +Those are defined here, instead of in the 'fixtures' module because +their use is spread across many other pytest modules, and centralizing it in 'fixtures' +would cause circular references. + +Also this makes the module light to import, as it should. +""" +from enum import Enum +from functools import total_ordering +from typing import Optional +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from typing_extensions import Literal + + _ScopeName = Literal["session", "package", "module", "class", "function"] + + +@total_ordering +class Scope(Enum): + """ + Represents one of the possible fixture scopes in pytest. + + Scopes are ordered from lower to higher, that is: + + ->>> higher ->>> + + Function < Class < Module < Package < Session + + <<<- lower <<<- + """ + + # Scopes need to be listed from lower to higher. + Function: "_ScopeName" = "function" + Class: "_ScopeName" = "class" + Module: "_ScopeName" = "module" + Package: "_ScopeName" = "package" + Session: "_ScopeName" = "session" + + def next_lower(self) -> "Scope": + """Return the next lower scope.""" + index = _SCOPE_INDICES[self] + if index == 0: + raise ValueError(f"{self} is the lower-most scope") + return _ALL_SCOPES[index - 1] + + def next_higher(self) -> "Scope": + """Return the next higher scope.""" + index = _SCOPE_INDICES[self] + if index == len(_SCOPE_INDICES) - 1: + raise ValueError(f"{self} is the upper-most scope") + return _ALL_SCOPES[index + 1] + + def __lt__(self, other: "Scope") -> bool: + self_index = _SCOPE_INDICES[self] + other_index = _SCOPE_INDICES[other] + return self_index < other_index + + @classmethod + def from_user( + cls, scope_name: "_ScopeName", descr: str, where: Optional[str] = None + ) -> "Scope": + """ + Given a scope name from the user, return the equivalent Scope enum. Should be used + whenever we want to convert a user provided scope name to its enum object. + + If the scope name is invalid, construct a user friendly message and call pytest.fail. + """ + from _pytest.outcomes import fail + + try: + # Holding this reference is necessary for mypy at the moment. + scope = Scope(scope_name) + except ValueError: + fail( + "{} {}got an unexpected scope value '{}'".format( + descr, f"from {where} " if where else "", scope_name + ), + pytrace=False, + ) + return scope + + +_ALL_SCOPES = list(Scope) +_SCOPE_INDICES = {scope: index for index, scope in enumerate(_ALL_SCOPES)} + + +# Ordered list of scopes which can contain many tests (in practice all except Function). +HIGH_SCOPES = [x for x in Scope if x is not Scope.Function] diff --git a/venv/lib/python3.12/site-packages/_pytest/setuponly.py b/venv/lib/python3.12/site-packages/_pytest/setuponly.py new file mode 100644 index 0000000000..583590d6b7 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/setuponly.py @@ -0,0 +1,97 @@ +from typing import Generator +from typing import Optional +from typing import Union + +import pytest +from _pytest._io.saferepr import saferepr +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config.argparsing import Parser +from _pytest.fixtures import FixtureDef +from _pytest.fixtures import SubRequest +from _pytest.scope import Scope + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("debugconfig") + group.addoption( + "--setuponly", + "--setup-only", + action="store_true", + help="Only setup fixtures, do not execute tests", + ) + group.addoption( + "--setupshow", + "--setup-show", + action="store_true", + help="Show setup of fixtures while executing tests", + ) + + +@pytest.hookimpl(hookwrapper=True) +def pytest_fixture_setup( + fixturedef: FixtureDef[object], request: SubRequest +) -> Generator[None, None, None]: + yield + if request.config.option.setupshow: + if hasattr(request, "param"): + # Save the fixture parameter so ._show_fixture_action() can + # display it now and during the teardown (in .finish()). + if fixturedef.ids: + if callable(fixturedef.ids): + param = fixturedef.ids(request.param) + else: + param = fixturedef.ids[request.param_index] + else: + param = request.param + fixturedef.cached_param = param # type: ignore[attr-defined] + _show_fixture_action(fixturedef, "SETUP") + + +def pytest_fixture_post_finalizer(fixturedef: FixtureDef[object]) -> None: + if fixturedef.cached_result is not None: + config = fixturedef._fixturemanager.config + if config.option.setupshow: + _show_fixture_action(fixturedef, "TEARDOWN") + if hasattr(fixturedef, "cached_param"): + del fixturedef.cached_param # type: ignore[attr-defined] + + +def _show_fixture_action(fixturedef: FixtureDef[object], msg: str) -> None: + config = fixturedef._fixturemanager.config + capman = config.pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture() + + tw = config.get_terminal_writer() + tw.line() + # Use smaller indentation the higher the scope: Session = 0, Package = 1, etc. + scope_indent = list(reversed(Scope)).index(fixturedef._scope) + tw.write(" " * 2 * scope_indent) + tw.write( + "{step} {scope} {fixture}".format( + step=msg.ljust(8), # align the output to TEARDOWN + scope=fixturedef.scope[0].upper(), + fixture=fixturedef.argname, + ) + ) + + if msg == "SETUP": + deps = sorted(arg for arg in fixturedef.argnames if arg != "request") + if deps: + tw.write(" (fixtures used: {})".format(", ".join(deps))) + + if hasattr(fixturedef, "cached_param"): + tw.write(f"[{saferepr(fixturedef.cached_param, maxsize=42)}]") # type: ignore[attr-defined] + + tw.flush() + + if capman: + capman.resume_global_capture() + + +@pytest.hookimpl(tryfirst=True) +def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]: + if config.option.setuponly: + config.option.setupshow = True + return None diff --git a/venv/lib/python3.12/site-packages/_pytest/setupplan.py b/venv/lib/python3.12/site-packages/_pytest/setupplan.py new file mode 100644 index 0000000000..1a4ebdd99c --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/setupplan.py @@ -0,0 +1,40 @@ +from typing import Optional +from typing import Union + +import pytest +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config.argparsing import Parser +from _pytest.fixtures import FixtureDef +from _pytest.fixtures import SubRequest + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("debugconfig") + group.addoption( + "--setupplan", + "--setup-plan", + action="store_true", + help="Show what fixtures and tests would be executed but " + "don't execute anything", + ) + + +@pytest.hookimpl(tryfirst=True) +def pytest_fixture_setup( + fixturedef: FixtureDef[object], request: SubRequest +) -> Optional[object]: + # Will return a dummy fixture if the setuponly option is provided. + if request.config.option.setupplan: + my_cache_key = fixturedef.cache_key(request) + fixturedef.cached_result = (None, my_cache_key, None) + return fixturedef.cached_result + return None + + +@pytest.hookimpl(tryfirst=True) +def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]: + if config.option.setupplan: + config.option.setuponly = True + config.option.setupshow = True + return None diff --git a/venv/lib/python3.12/site-packages/_pytest/skipping.py b/venv/lib/python3.12/site-packages/_pytest/skipping.py new file mode 100644 index 0000000000..26ce73758a --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/skipping.py @@ -0,0 +1,297 @@ +"""Support for skip/xfail functions and markers.""" +import dataclasses +import os +import platform +import sys +import traceback +from collections.abc import Mapping +from typing import Generator +from typing import Optional +from typing import Tuple +from typing import Type + +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.mark.structures import Mark +from _pytest.nodes import Item +from _pytest.outcomes import fail +from _pytest.outcomes import skip +from _pytest.outcomes import xfail +from _pytest.reports import BaseReport +from _pytest.runner import CallInfo +from _pytest.stash import StashKey + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group.addoption( + "--runxfail", + action="store_true", + dest="runxfail", + default=False, + help="Report the results of xfail tests as if they were not marked", + ) + + parser.addini( + "xfail_strict", + "Default for the strict parameter of xfail " + "markers when not given explicitly (default: False)", + default=False, + type="bool", + ) + + +def pytest_configure(config: Config) -> None: + if config.option.runxfail: + # yay a hack + import pytest + + old = pytest.xfail + config.add_cleanup(lambda: setattr(pytest, "xfail", old)) + + def nop(*args, **kwargs): + pass + + nop.Exception = xfail.Exception # type: ignore[attr-defined] + setattr(pytest, "xfail", nop) + + config.addinivalue_line( + "markers", + "skip(reason=None): skip the given test function with an optional reason. " + 'Example: skip(reason="no way of currently testing this") skips the ' + "test.", + ) + config.addinivalue_line( + "markers", + "skipif(condition, ..., *, reason=...): " + "skip the given test function if any of the conditions evaluate to True. " + "Example: skipif(sys.platform == 'win32') skips the test if we are on the win32 platform. " + "See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-skipif", + ) + config.addinivalue_line( + "markers", + "xfail(condition, ..., *, reason=..., run=True, raises=None, strict=xfail_strict): " + "mark the test function as an expected failure if any of the conditions " + "evaluate to True. Optionally specify a reason for better reporting " + "and run=False if you don't even want to execute the test function. " + "If only specific exception(s) are expected, you can list them in " + "raises, and if the test fails in other ways, it will be reported as " + "a true failure. See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-xfail", + ) + + +def evaluate_condition(item: Item, mark: Mark, condition: object) -> Tuple[bool, str]: + """Evaluate a single skipif/xfail condition. + + If an old-style string condition is given, it is eval()'d, otherwise the + condition is bool()'d. If this fails, an appropriately formatted pytest.fail + is raised. + + Returns (result, reason). The reason is only relevant if the result is True. + """ + # String condition. + if isinstance(condition, str): + globals_ = { + "os": os, + "sys": sys, + "platform": platform, + "config": item.config, + } + for dictionary in reversed( + item.ihook.pytest_markeval_namespace(config=item.config) + ): + if not isinstance(dictionary, Mapping): + raise ValueError( + "pytest_markeval_namespace() needs to return a dict, got {!r}".format( + dictionary + ) + ) + globals_.update(dictionary) + if hasattr(item, "obj"): + globals_.update(item.obj.__globals__) # type: ignore[attr-defined] + try: + filename = f"<{mark.name} condition>" + condition_code = compile(condition, filename, "eval") + result = eval(condition_code, globals_) + except SyntaxError as exc: + msglines = [ + "Error evaluating %r condition" % mark.name, + " " + condition, + " " + " " * (exc.offset or 0) + "^", + "SyntaxError: invalid syntax", + ] + fail("\n".join(msglines), pytrace=False) + except Exception as exc: + msglines = [ + "Error evaluating %r condition" % mark.name, + " " + condition, + *traceback.format_exception_only(type(exc), exc), + ] + fail("\n".join(msglines), pytrace=False) + + # Boolean condition. + else: + try: + result = bool(condition) + except Exception as exc: + msglines = [ + "Error evaluating %r condition as a boolean" % mark.name, + *traceback.format_exception_only(type(exc), exc), + ] + fail("\n".join(msglines), pytrace=False) + + reason = mark.kwargs.get("reason", None) + if reason is None: + if isinstance(condition, str): + reason = "condition: " + condition + else: + # XXX better be checked at collection time + msg = ( + "Error evaluating %r: " % mark.name + + "you need to specify reason=STRING when using booleans as conditions." + ) + fail(msg, pytrace=False) + + return result, reason + + +@dataclasses.dataclass(frozen=True) +class Skip: + """The result of evaluate_skip_marks().""" + + reason: str = "unconditional skip" + + +def evaluate_skip_marks(item: Item) -> Optional[Skip]: + """Evaluate skip and skipif marks on item, returning Skip if triggered.""" + for mark in item.iter_markers(name="skipif"): + if "condition" not in mark.kwargs: + conditions = mark.args + else: + conditions = (mark.kwargs["condition"],) + + # Unconditional. + if not conditions: + reason = mark.kwargs.get("reason", "") + return Skip(reason) + + # If any of the conditions are true. + for condition in conditions: + result, reason = evaluate_condition(item, mark, condition) + if result: + return Skip(reason) + + for mark in item.iter_markers(name="skip"): + try: + return Skip(*mark.args, **mark.kwargs) + except TypeError as e: + raise TypeError(str(e) + " - maybe you meant pytest.mark.skipif?") from None + + return None + + +@dataclasses.dataclass(frozen=True) +class Xfail: + """The result of evaluate_xfail_marks().""" + + __slots__ = ("reason", "run", "strict", "raises") + + reason: str + run: bool + strict: bool + raises: Optional[Tuple[Type[BaseException], ...]] + + +def evaluate_xfail_marks(item: Item) -> Optional[Xfail]: + """Evaluate xfail marks on item, returning Xfail if triggered.""" + for mark in item.iter_markers(name="xfail"): + run = mark.kwargs.get("run", True) + strict = mark.kwargs.get("strict", item.config.getini("xfail_strict")) + raises = mark.kwargs.get("raises", None) + if "condition" not in mark.kwargs: + conditions = mark.args + else: + conditions = (mark.kwargs["condition"],) + + # Unconditional. + if not conditions: + reason = mark.kwargs.get("reason", "") + return Xfail(reason, run, strict, raises) + + # If any of the conditions are true. + for condition in conditions: + result, reason = evaluate_condition(item, mark, condition) + if result: + return Xfail(reason, run, strict, raises) + + return None + + +# Saves the xfail mark evaluation. Can be refreshed during call if None. +xfailed_key = StashKey[Optional[Xfail]]() + + +@hookimpl(tryfirst=True) +def pytest_runtest_setup(item: Item) -> None: + skipped = evaluate_skip_marks(item) + if skipped: + raise skip.Exception(skipped.reason, _use_item_location=True) + + item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item) + if xfailed and not item.config.option.runxfail and not xfailed.run: + xfail("[NOTRUN] " + xfailed.reason) + + +@hookimpl(hookwrapper=True) +def pytest_runtest_call(item: Item) -> Generator[None, None, None]: + xfailed = item.stash.get(xfailed_key, None) + if xfailed is None: + item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item) + + if xfailed and not item.config.option.runxfail and not xfailed.run: + xfail("[NOTRUN] " + xfailed.reason) + + yield + + # The test run may have added an xfail mark dynamically. + xfailed = item.stash.get(xfailed_key, None) + if xfailed is None: + item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item) + + +@hookimpl(hookwrapper=True) +def pytest_runtest_makereport(item: Item, call: CallInfo[None]): + outcome = yield + rep = outcome.get_result() + xfailed = item.stash.get(xfailed_key, None) + if item.config.option.runxfail: + pass # don't interfere + elif call.excinfo and isinstance(call.excinfo.value, xfail.Exception): + assert call.excinfo.value.msg is not None + rep.wasxfail = "reason: " + call.excinfo.value.msg + rep.outcome = "skipped" + elif not rep.skipped and xfailed: + if call.excinfo: + raises = xfailed.raises + if raises is not None and not isinstance(call.excinfo.value, raises): + rep.outcome = "failed" + else: + rep.outcome = "skipped" + rep.wasxfail = xfailed.reason + elif call.when == "call": + if xfailed.strict: + rep.outcome = "failed" + rep.longrepr = "[XPASS(strict)] " + xfailed.reason + else: + rep.outcome = "passed" + rep.wasxfail = xfailed.reason + + +def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]: + if hasattr(report, "wasxfail"): + if report.skipped: + return "xfailed", "x", "XFAIL" + elif report.passed: + return "xpassed", "X", "XPASS" + return None diff --git a/venv/lib/python3.12/site-packages/_pytest/stash.py b/venv/lib/python3.12/site-packages/_pytest/stash.py new file mode 100644 index 0000000000..e61d75b95f --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/stash.py @@ -0,0 +1,112 @@ +from typing import Any +from typing import cast +from typing import Dict +from typing import Generic +from typing import TypeVar +from typing import Union + + +__all__ = ["Stash", "StashKey"] + + +T = TypeVar("T") +D = TypeVar("D") + + +class StashKey(Generic[T]): + """``StashKey`` is an object used as a key to a :class:`Stash`. + + A ``StashKey`` is associated with the type ``T`` of the value of the key. + + A ``StashKey`` is unique and cannot conflict with another key. + """ + + __slots__ = () + + +class Stash: + r"""``Stash`` is a type-safe heterogeneous mutable mapping that + allows keys and value types to be defined separately from + where it (the ``Stash``) is created. + + Usually you will be given an object which has a ``Stash``, for example + :class:`~pytest.Config` or a :class:`~_pytest.nodes.Node`: + + .. code-block:: python + + stash: Stash = some_object.stash + + If a module or plugin wants to store data in this ``Stash``, it creates + :class:`StashKey`\s for its keys (at the module level): + + .. code-block:: python + + # At the top-level of the module + some_str_key = StashKey[str]() + some_bool_key = StashKey[bool]() + + To store information: + + .. code-block:: python + + # Value type must match the key. + stash[some_str_key] = "value" + stash[some_bool_key] = True + + To retrieve the information: + + .. code-block:: python + + # The static type of some_str is str. + some_str = stash[some_str_key] + # The static type of some_bool is bool. + some_bool = stash[some_bool_key] + """ + + __slots__ = ("_storage",) + + def __init__(self) -> None: + self._storage: Dict[StashKey[Any], object] = {} + + def __setitem__(self, key: StashKey[T], value: T) -> None: + """Set a value for key.""" + self._storage[key] = value + + def __getitem__(self, key: StashKey[T]) -> T: + """Get the value for key. + + Raises ``KeyError`` if the key wasn't set before. + """ + return cast(T, self._storage[key]) + + def get(self, key: StashKey[T], default: D) -> Union[T, D]: + """Get the value for key, or return default if the key wasn't set + before.""" + try: + return self[key] + except KeyError: + return default + + def setdefault(self, key: StashKey[T], default: T) -> T: + """Return the value of key if already set, otherwise set the value + of key to default and return default.""" + try: + return self[key] + except KeyError: + self[key] = default + return default + + def __delitem__(self, key: StashKey[T]) -> None: + """Delete the value for key. + + Raises ``KeyError`` if the key wasn't set before. + """ + del self._storage[key] + + def __contains__(self, key: StashKey[T]) -> bool: + """Return whether key was set.""" + return key in self._storage + + def __len__(self) -> int: + """Return how many items exist in the stash.""" + return len(self._storage) diff --git a/venv/lib/python3.12/site-packages/_pytest/stepwise.py b/venv/lib/python3.12/site-packages/_pytest/stepwise.py new file mode 100644 index 0000000000..74ad9dbd4d --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/stepwise.py @@ -0,0 +1,130 @@ +from typing import List +from typing import Optional +from typing import TYPE_CHECKING + +import pytest +from _pytest import nodes +from _pytest.config import Config +from _pytest.config.argparsing import Parser +from _pytest.main import Session +from _pytest.reports import TestReport + +if TYPE_CHECKING: + from _pytest.cacheprovider import Cache + +STEPWISE_CACHE_DIR = "cache/stepwise" + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group.addoption( + "--sw", + "--stepwise", + action="store_true", + default=False, + dest="stepwise", + help="Exit on test failure and continue from last failing test next time", + ) + group.addoption( + "--sw-skip", + "--stepwise-skip", + action="store_true", + default=False, + dest="stepwise_skip", + help="Ignore the first failing test but stop on the next failing test. " + "Implicitly enables --stepwise.", + ) + + +@pytest.hookimpl +def pytest_configure(config: Config) -> None: + if config.option.stepwise_skip: + # allow --stepwise-skip to work on it's own merits. + config.option.stepwise = True + if config.getoption("stepwise"): + config.pluginmanager.register(StepwisePlugin(config), "stepwiseplugin") + + +def pytest_sessionfinish(session: Session) -> None: + if not session.config.getoption("stepwise"): + assert session.config.cache is not None + if hasattr(session.config, "workerinput"): + # Do not update cache if this process is a xdist worker to prevent + # race conditions (#10641). + return + # Clear the list of failing tests if the plugin is not active. + session.config.cache.set(STEPWISE_CACHE_DIR, []) + + +class StepwisePlugin: + def __init__(self, config: Config) -> None: + self.config = config + self.session: Optional[Session] = None + self.report_status = "" + assert config.cache is not None + self.cache: Cache = config.cache + self.lastfailed: Optional[str] = self.cache.get(STEPWISE_CACHE_DIR, None) + self.skip: bool = config.getoption("stepwise_skip") + + def pytest_sessionstart(self, session: Session) -> None: + self.session = session + + def pytest_collection_modifyitems( + self, config: Config, items: List[nodes.Item] + ) -> None: + if not self.lastfailed: + self.report_status = "no previously failed tests, not skipping." + return + + # check all item nodes until we find a match on last failed + failed_index = None + for index, item in enumerate(items): + if item.nodeid == self.lastfailed: + failed_index = index + break + + # If the previously failed test was not found among the test items, + # do not skip any tests. + if failed_index is None: + self.report_status = "previously failed test not found, not skipping." + else: + self.report_status = f"skipping {failed_index} already passed items." + deselected = items[:failed_index] + del items[:failed_index] + config.hook.pytest_deselected(items=deselected) + + def pytest_runtest_logreport(self, report: TestReport) -> None: + if report.failed: + if self.skip: + # Remove test from the failed ones (if it exists) and unset the skip option + # to make sure the following tests will not be skipped. + if report.nodeid == self.lastfailed: + self.lastfailed = None + + self.skip = False + else: + # Mark test as the last failing and interrupt the test session. + self.lastfailed = report.nodeid + assert self.session is not None + self.session.shouldstop = ( + "Test failed, continuing from this test next run." + ) + + else: + # If the test was actually run and did pass. + if report.when == "call": + # Remove test from the failed ones, if exists. + if report.nodeid == self.lastfailed: + self.lastfailed = None + + def pytest_report_collectionfinish(self) -> Optional[str]: + if self.config.getoption("verbose") >= 0 and self.report_status: + return f"stepwise: {self.report_status}" + return None + + def pytest_sessionfinish(self) -> None: + if hasattr(self.config, "workerinput"): + # Do not update cache if this process is a xdist worker to prevent + # race conditions (#10641). + return + self.cache.set(STEPWISE_CACHE_DIR, self.lastfailed) diff --git a/venv/lib/python3.12/site-packages/_pytest/terminal.py b/venv/lib/python3.12/site-packages/_pytest/terminal.py new file mode 100644 index 0000000000..b0cdb58ce0 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/terminal.py @@ -0,0 +1,1481 @@ +"""Terminal reporting of the full testing process. + +This is a good source for looking at the various reporting hooks. +""" +import argparse +import dataclasses +import datetime +import inspect +import platform +import sys +import textwrap +import warnings +from collections import Counter +from functools import partial +from pathlib import Path +from typing import Any +from typing import Callable +from typing import cast +from typing import ClassVar +from typing import Dict +from typing import Generator +from typing import List +from typing import Mapping +from typing import NamedTuple +from typing import Optional +from typing import Sequence +from typing import Set +from typing import TextIO +from typing import Tuple +from typing import TYPE_CHECKING +from typing import Union + +import pluggy + +import _pytest._version +from _pytest import nodes +from _pytest import timing +from _pytest._code import ExceptionInfo +from _pytest._code.code import ExceptionRepr +from _pytest._io import TerminalWriter +from _pytest._io.wcwidth import wcswidth +from _pytest.assertion.util import running_on_ci +from _pytest.compat import final +from _pytest.config import _PluggyPlugin +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.nodes import Item +from _pytest.nodes import Node +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath +from _pytest.reports import BaseReport +from _pytest.reports import CollectReport +from _pytest.reports import TestReport + +if TYPE_CHECKING: + from typing_extensions import Literal + + from _pytest.main import Session + + +REPORT_COLLECTING_RESOLUTION = 0.5 + +KNOWN_TYPES = ( + "failed", + "passed", + "skipped", + "deselected", + "xfailed", + "xpassed", + "warnings", + "error", +) + +_REPORTCHARS_DEFAULT = "fE" + + +class MoreQuietAction(argparse.Action): + """A modified copy of the argparse count action which counts down and updates + the legacy quiet attribute at the same time. + + Used to unify verbosity handling. + """ + + def __init__( + self, + option_strings: Sequence[str], + dest: str, + default: object = None, + required: bool = False, + help: Optional[str] = None, + ) -> None: + super().__init__( + option_strings=option_strings, + dest=dest, + nargs=0, + default=default, + required=required, + help=help, + ) + + def __call__( + self, + parser: argparse.ArgumentParser, + namespace: argparse.Namespace, + values: Union[str, Sequence[object], None], + option_string: Optional[str] = None, + ) -> None: + new_count = getattr(namespace, self.dest, 0) - 1 + setattr(namespace, self.dest, new_count) + # todo Deprecate config.quiet + namespace.quiet = getattr(namespace, "quiet", 0) + 1 + + +class TestShortLogReport(NamedTuple): + """Used to store the test status result category, shortletter and verbose word. + For example ``"rerun", "R", ("RERUN", {"yellow": True})``. + + :ivar category: + The class of result, for example ``“passed”``, ``“skipped”``, ``“error”``, or the empty string. + + :ivar letter: + The short letter shown as testing progresses, for example ``"."``, ``"s"``, ``"E"``, or the empty string. + + :ivar word: + Verbose word is shown as testing progresses in verbose mode, for example ``"PASSED"``, ``"SKIPPED"``, + ``"ERROR"``, or the empty string. + """ + + category: str + letter: str + word: Union[str, Tuple[str, Mapping[str, bool]]] + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("terminal reporting", "Reporting", after="general") + group._addoption( + "-v", + "--verbose", + action="count", + default=0, + dest="verbose", + help="Increase verbosity", + ) + group._addoption( + "--no-header", + action="store_true", + default=False, + dest="no_header", + help="Disable header", + ) + group._addoption( + "--no-summary", + action="store_true", + default=False, + dest="no_summary", + help="Disable summary", + ) + group._addoption( + "-q", + "--quiet", + action=MoreQuietAction, + default=0, + dest="verbose", + help="Decrease verbosity", + ) + group._addoption( + "--verbosity", + dest="verbose", + type=int, + default=0, + help="Set verbosity. Default: 0.", + ) + group._addoption( + "-r", + action="store", + dest="reportchars", + default=_REPORTCHARS_DEFAULT, + metavar="chars", + help="Show extra test summary info as specified by chars: (f)ailed, " + "(E)rror, (s)kipped, (x)failed, (X)passed, " + "(p)assed, (P)assed with output, (a)ll except passed (p/P), or (A)ll. " + "(w)arnings are enabled by default (see --disable-warnings), " + "'N' can be used to reset the list. (default: 'fE').", + ) + group._addoption( + "--disable-warnings", + "--disable-pytest-warnings", + default=False, + dest="disable_warnings", + action="store_true", + help="Disable warnings summary", + ) + group._addoption( + "-l", + "--showlocals", + action="store_true", + dest="showlocals", + default=False, + help="Show locals in tracebacks (disabled by default)", + ) + group._addoption( + "--no-showlocals", + action="store_false", + dest="showlocals", + help="Hide locals in tracebacks (negate --showlocals passed through addopts)", + ) + group._addoption( + "--tb", + metavar="style", + action="store", + dest="tbstyle", + default="auto", + choices=["auto", "long", "short", "no", "line", "native"], + help="Traceback print mode (auto/long/short/line/native/no)", + ) + group._addoption( + "--show-capture", + action="store", + dest="showcapture", + choices=["no", "stdout", "stderr", "log", "all"], + default="all", + help="Controls how captured stdout/stderr/log is shown on failed tests. " + "Default: all.", + ) + group._addoption( + "--fulltrace", + "--full-trace", + action="store_true", + default=False, + help="Don't cut any tracebacks (default is to cut)", + ) + group._addoption( + "--color", + metavar="color", + action="store", + dest="color", + default="auto", + choices=["yes", "no", "auto"], + help="Color terminal output (yes/no/auto)", + ) + group._addoption( + "--code-highlight", + default="yes", + choices=["yes", "no"], + help="Whether code should be highlighted (only if --color is also enabled). " + "Default: yes.", + ) + + parser.addini( + "console_output_style", + help='Console output: "classic", or with additional progress information ' + '("progress" (percentage) | "count" | "progress-even-when-capture-no" (forces ' + "progress even when capture=no)", + default="progress", + ) + + +def pytest_configure(config: Config) -> None: + reporter = TerminalReporter(config, sys.stdout) + config.pluginmanager.register(reporter, "terminalreporter") + if config.option.debug or config.option.traceconfig: + + def mywriter(tags, args): + msg = " ".join(map(str, args)) + reporter.write_line("[traceconfig] " + msg) + + config.trace.root.setprocessor("pytest:config", mywriter) + + +def getreportopt(config: Config) -> str: + reportchars: str = config.option.reportchars + + old_aliases = {"F", "S"} + reportopts = "" + for char in reportchars: + if char in old_aliases: + char = char.lower() + if char == "a": + reportopts = "sxXEf" + elif char == "A": + reportopts = "PpsxXEf" + elif char == "N": + reportopts = "" + elif char not in reportopts: + reportopts += char + + if not config.option.disable_warnings and "w" not in reportopts: + reportopts = "w" + reportopts + elif config.option.disable_warnings and "w" in reportopts: + reportopts = reportopts.replace("w", "") + + return reportopts + + +@hookimpl(trylast=True) # after _pytest.runner +def pytest_report_teststatus(report: BaseReport) -> Tuple[str, str, str]: + letter = "F" + if report.passed: + letter = "." + elif report.skipped: + letter = "s" + + outcome: str = report.outcome + if report.when in ("collect", "setup", "teardown") and outcome == "failed": + outcome = "error" + letter = "E" + + return outcome, letter, outcome.upper() + + +@dataclasses.dataclass +class WarningReport: + """Simple structure to hold warnings information captured by ``pytest_warning_recorded``. + + :ivar str message: + User friendly message about the warning. + :ivar str|None nodeid: + nodeid that generated the warning (see ``get_location``). + :ivar tuple fslocation: + File system location of the source of the warning (see ``get_location``). + """ + + message: str + nodeid: Optional[str] = None + fslocation: Optional[Tuple[str, int]] = None + + count_towards_summary: ClassVar = True + + def get_location(self, config: Config) -> Optional[str]: + """Return the more user-friendly information about the location of a warning, or None.""" + if self.nodeid: + return self.nodeid + if self.fslocation: + filename, linenum = self.fslocation + relpath = bestrelpath(config.invocation_params.dir, absolutepath(filename)) + return f"{relpath}:{linenum}" + return None + + +@final +class TerminalReporter: + def __init__(self, config: Config, file: Optional[TextIO] = None) -> None: + import _pytest.config + + self.config = config + self._numcollected = 0 + self._session: Optional[Session] = None + self._showfspath: Optional[bool] = None + + self.stats: Dict[str, List[Any]] = {} + self._main_color: Optional[str] = None + self._known_types: Optional[List[str]] = None + self.startpath = config.invocation_params.dir + if file is None: + file = sys.stdout + self._tw = _pytest.config.create_terminal_writer(config, file) + self._screen_width = self._tw.fullwidth + self.currentfspath: Union[None, Path, str, int] = None + self.reportchars = getreportopt(config) + self.hasmarkup = self._tw.hasmarkup + self.isatty = file.isatty() + self._progress_nodeids_reported: Set[str] = set() + self._show_progress_info = self._determine_show_progress_info() + self._collect_report_last_write: Optional[float] = None + self._already_displayed_warnings: Optional[int] = None + self._keyboardinterrupt_memo: Optional[ExceptionRepr] = None + + def _determine_show_progress_info(self) -> "Literal['progress', 'count', False]": + """Return whether we should display progress information based on the current config.""" + # do not show progress if we are not capturing output (#3038) unless explicitly + # overridden by progress-even-when-capture-no + if ( + self.config.getoption("capture", "no") == "no" + and self.config.getini("console_output_style") + != "progress-even-when-capture-no" + ): + return False + # do not show progress if we are showing fixture setup/teardown + if self.config.getoption("setupshow", False): + return False + cfg: str = self.config.getini("console_output_style") + if cfg == "progress" or cfg == "progress-even-when-capture-no": + return "progress" + elif cfg == "count": + return "count" + else: + return False + + @property + def verbosity(self) -> int: + verbosity: int = self.config.option.verbose + return verbosity + + @property + def showheader(self) -> bool: + return self.verbosity >= 0 + + @property + def no_header(self) -> bool: + return bool(self.config.option.no_header) + + @property + def no_summary(self) -> bool: + return bool(self.config.option.no_summary) + + @property + def showfspath(self) -> bool: + if self._showfspath is None: + return self.verbosity >= 0 + return self._showfspath + + @showfspath.setter + def showfspath(self, value: Optional[bool]) -> None: + self._showfspath = value + + @property + def showlongtestinfo(self) -> bool: + return self.verbosity > 0 + + def hasopt(self, char: str) -> bool: + char = {"xfailed": "x", "skipped": "s"}.get(char, char) + return char in self.reportchars + + def write_fspath_result(self, nodeid: str, res, **markup: bool) -> None: + fspath = self.config.rootpath / nodeid.split("::")[0] + if self.currentfspath is None or fspath != self.currentfspath: + if self.currentfspath is not None and self._show_progress_info: + self._write_progress_information_filling_space() + self.currentfspath = fspath + relfspath = bestrelpath(self.startpath, fspath) + self._tw.line() + self._tw.write(relfspath + " ") + self._tw.write(res, flush=True, **markup) + + def write_ensure_prefix(self, prefix: str, extra: str = "", **kwargs) -> None: + if self.currentfspath != prefix: + self._tw.line() + self.currentfspath = prefix + self._tw.write(prefix) + if extra: + self._tw.write(extra, **kwargs) + self.currentfspath = -2 + + def ensure_newline(self) -> None: + if self.currentfspath: + self._tw.line() + self.currentfspath = None + + def wrap_write( + self, + content: str, + *, + flush: bool = False, + margin: int = 8, + line_sep: str = "\n", + **markup: bool, + ) -> None: + """Wrap message with margin for progress info.""" + width_of_current_line = self._tw.width_of_current_line + wrapped = line_sep.join( + textwrap.wrap( + " " * width_of_current_line + content, + width=self._screen_width - margin, + drop_whitespace=True, + replace_whitespace=False, + ), + ) + wrapped = wrapped[width_of_current_line:] + self._tw.write(wrapped, flush=flush, **markup) + + def write(self, content: str, *, flush: bool = False, **markup: bool) -> None: + self._tw.write(content, flush=flush, **markup) + + def flush(self) -> None: + self._tw.flush() + + def write_line(self, line: Union[str, bytes], **markup: bool) -> None: + if not isinstance(line, str): + line = str(line, errors="replace") + self.ensure_newline() + self._tw.line(line, **markup) + + def rewrite(self, line: str, **markup: bool) -> None: + """Rewinds the terminal cursor to the beginning and writes the given line. + + :param erase: + If True, will also add spaces until the full terminal width to ensure + previous lines are properly erased. + + The rest of the keyword arguments are markup instructions. + """ + erase = markup.pop("erase", False) + if erase: + fill_count = self._tw.fullwidth - len(line) - 1 + fill = " " * fill_count + else: + fill = "" + line = str(line) + self._tw.write("\r" + line + fill, **markup) + + def write_sep( + self, + sep: str, + title: Optional[str] = None, + fullwidth: Optional[int] = None, + **markup: bool, + ) -> None: + self.ensure_newline() + self._tw.sep(sep, title, fullwidth, **markup) + + def section(self, title: str, sep: str = "=", **kw: bool) -> None: + self._tw.sep(sep, title, **kw) + + def line(self, msg: str, **kw: bool) -> None: + self._tw.line(msg, **kw) + + def _add_stats(self, category: str, items: Sequence[Any]) -> None: + set_main_color = category not in self.stats + self.stats.setdefault(category, []).extend(items) + if set_main_color: + self._set_main_color() + + def pytest_internalerror(self, excrepr: ExceptionRepr) -> bool: + for line in str(excrepr).split("\n"): + self.write_line("INTERNALERROR> " + line) + return True + + def pytest_warning_recorded( + self, + warning_message: warnings.WarningMessage, + nodeid: str, + ) -> None: + from _pytest.warnings import warning_record_to_str + + fslocation = warning_message.filename, warning_message.lineno + message = warning_record_to_str(warning_message) + + warning_report = WarningReport( + fslocation=fslocation, message=message, nodeid=nodeid + ) + self._add_stats("warnings", [warning_report]) + + def pytest_plugin_registered(self, plugin: _PluggyPlugin) -> None: + if self.config.option.traceconfig: + msg = f"PLUGIN registered: {plugin}" + # XXX This event may happen during setup/teardown time + # which unfortunately captures our output here + # which garbles our output if we use self.write_line. + self.write_line(msg) + + def pytest_deselected(self, items: Sequence[Item]) -> None: + self._add_stats("deselected", items) + + def pytest_runtest_logstart( + self, nodeid: str, location: Tuple[str, Optional[int], str] + ) -> None: + # Ensure that the path is printed before the + # 1st test of a module starts running. + if self.showlongtestinfo: + line = self._locationline(nodeid, *location) + self.write_ensure_prefix(line, "") + self.flush() + elif self.showfspath: + self.write_fspath_result(nodeid, "") + self.flush() + + def pytest_runtest_logreport(self, report: TestReport) -> None: + self._tests_ran = True + rep = report + + res = TestShortLogReport( + *self.config.hook.pytest_report_teststatus(report=rep, config=self.config) + ) + category, letter, word = res.category, res.letter, res.word + if not isinstance(word, tuple): + markup = None + else: + word, markup = word + self._add_stats(category, [rep]) + if not letter and not word: + # Probably passed setup/teardown. + return + running_xdist = hasattr(rep, "node") + if markup is None: + was_xfail = hasattr(report, "wasxfail") + if rep.passed and not was_xfail: + markup = {"green": True} + elif rep.passed and was_xfail: + markup = {"yellow": True} + elif rep.failed: + markup = {"red": True} + elif rep.skipped: + markup = {"yellow": True} + else: + markup = {} + if self.verbosity <= 0: + self._tw.write(letter, **markup) + else: + self._progress_nodeids_reported.add(rep.nodeid) + line = self._locationline(rep.nodeid, *rep.location) + if not running_xdist: + self.write_ensure_prefix(line, word, **markup) + if rep.skipped or hasattr(report, "wasxfail"): + reason = _get_raw_skip_reason(rep) + if self.config.option.verbose < 2: + available_width = ( + (self._tw.fullwidth - self._tw.width_of_current_line) + - len(" [100%]") + - 1 + ) + formatted_reason = _format_trimmed( + " ({})", reason, available_width + ) + else: + formatted_reason = f" ({reason})" + + if reason and formatted_reason is not None: + self.wrap_write(formatted_reason) + if self._show_progress_info: + self._write_progress_information_filling_space() + else: + self.ensure_newline() + self._tw.write("[%s]" % rep.node.gateway.id) + if self._show_progress_info: + self._tw.write( + self._get_progress_information_message() + " ", cyan=True + ) + else: + self._tw.write(" ") + self._tw.write(word, **markup) + self._tw.write(" " + line) + self.currentfspath = -2 + self.flush() + + @property + def _is_last_item(self) -> bool: + assert self._session is not None + return len(self._progress_nodeids_reported) == self._session.testscollected + + def pytest_runtest_logfinish(self, nodeid: str) -> None: + assert self._session + if self.verbosity <= 0 and self._show_progress_info: + if self._show_progress_info == "count": + num_tests = self._session.testscollected + progress_length = len(f" [{num_tests}/{num_tests}]") + else: + progress_length = len(" [100%]") + + self._progress_nodeids_reported.add(nodeid) + + if self._is_last_item: + self._write_progress_information_filling_space() + else: + main_color, _ = self._get_main_color() + w = self._width_of_current_line + past_edge = w + progress_length + 1 >= self._screen_width + if past_edge: + msg = self._get_progress_information_message() + self._tw.write(msg + "\n", **{main_color: True}) + + def _get_progress_information_message(self) -> str: + assert self._session + collected = self._session.testscollected + if self._show_progress_info == "count": + if collected: + progress = self._progress_nodeids_reported + counter_format = f"{{:{len(str(collected))}d}}" + format_string = f" [{counter_format}/{{}}]" + return format_string.format(len(progress), collected) + return f" [ {collected} / {collected} ]" + else: + if collected: + return " [{:3d}%]".format( + len(self._progress_nodeids_reported) * 100 // collected + ) + return " [100%]" + + def _write_progress_information_filling_space(self) -> None: + color, _ = self._get_main_color() + msg = self._get_progress_information_message() + w = self._width_of_current_line + fill = self._tw.fullwidth - w - 1 + self.write(msg.rjust(fill), flush=True, **{color: True}) + + @property + def _width_of_current_line(self) -> int: + """Return the width of the current line.""" + return self._tw.width_of_current_line + + def pytest_collection(self) -> None: + if self.isatty: + if self.config.option.verbose >= 0: + self.write("collecting ... ", flush=True, bold=True) + self._collect_report_last_write = timing.time() + elif self.config.option.verbose >= 1: + self.write("collecting ... ", flush=True, bold=True) + + def pytest_collectreport(self, report: CollectReport) -> None: + if report.failed: + self._add_stats("error", [report]) + elif report.skipped: + self._add_stats("skipped", [report]) + items = [x for x in report.result if isinstance(x, Item)] + self._numcollected += len(items) + if self.isatty: + self.report_collect() + + def report_collect(self, final: bool = False) -> None: + if self.config.option.verbose < 0: + return + + if not final: + # Only write "collecting" report every 0.5s. + t = timing.time() + if ( + self._collect_report_last_write is not None + and self._collect_report_last_write > t - REPORT_COLLECTING_RESOLUTION + ): + return + self._collect_report_last_write = t + + errors = len(self.stats.get("error", [])) + skipped = len(self.stats.get("skipped", [])) + deselected = len(self.stats.get("deselected", [])) + selected = self._numcollected - deselected + line = "collected " if final else "collecting " + line += ( + str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s") + ) + if errors: + line += " / %d error%s" % (errors, "s" if errors != 1 else "") + if deselected: + line += " / %d deselected" % deselected + if skipped: + line += " / %d skipped" % skipped + if self._numcollected > selected: + line += " / %d selected" % selected + if self.isatty: + self.rewrite(line, bold=True, erase=True) + if final: + self.write("\n") + else: + self.write_line(line) + + @hookimpl(trylast=True) + def pytest_sessionstart(self, session: "Session") -> None: + self._session = session + self._sessionstarttime = timing.time() + if not self.showheader: + return + self.write_sep("=", "test session starts", bold=True) + verinfo = platform.python_version() + if not self.no_header: + msg = f"platform {sys.platform} -- Python {verinfo}" + pypy_version_info = getattr(sys, "pypy_version_info", None) + if pypy_version_info: + verinfo = ".".join(map(str, pypy_version_info[:3])) + msg += f"[pypy-{verinfo}-{pypy_version_info[3]}]" + msg += ", pytest-{}, pluggy-{}".format( + _pytest._version.version, pluggy.__version__ + ) + if ( + self.verbosity > 0 + or self.config.option.debug + or getattr(self.config.option, "pastebin", None) + ): + msg += " -- " + str(sys.executable) + self.write_line(msg) + lines = self.config.hook.pytest_report_header( + config=self.config, start_path=self.startpath + ) + self._write_report_lines_from_hooks(lines) + + def _write_report_lines_from_hooks( + self, lines: Sequence[Union[str, Sequence[str]]] + ) -> None: + for line_or_lines in reversed(lines): + if isinstance(line_or_lines, str): + self.write_line(line_or_lines) + else: + for line in line_or_lines: + self.write_line(line) + + def pytest_report_header(self, config: Config) -> List[str]: + result = [f"rootdir: {config.rootpath}"] + + if config.inipath: + result.append("configfile: " + bestrelpath(config.rootpath, config.inipath)) + + if config.args_source == Config.ArgsSource.TESTPATHS: + testpaths: List[str] = config.getini("testpaths") + result.append("testpaths: {}".format(", ".join(testpaths))) + + plugininfo = config.pluginmanager.list_plugin_distinfo() + if plugininfo: + result.append("plugins: %s" % ", ".join(_plugin_nameversions(plugininfo))) + return result + + def pytest_collection_finish(self, session: "Session") -> None: + self.report_collect(True) + + lines = self.config.hook.pytest_report_collectionfinish( + config=self.config, + start_path=self.startpath, + items=session.items, + ) + self._write_report_lines_from_hooks(lines) + + if self.config.getoption("collectonly"): + if session.items: + if self.config.option.verbose > -1: + self._tw.line("") + self._printcollecteditems(session.items) + + failed = self.stats.get("failed") + if failed: + self._tw.sep("!", "collection failures") + for rep in failed: + rep.toterminal(self._tw) + + def _printcollecteditems(self, items: Sequence[Item]) -> None: + if self.config.option.verbose < 0: + if self.config.option.verbose < -1: + counts = Counter(item.nodeid.split("::", 1)[0] for item in items) + for name, count in sorted(counts.items()): + self._tw.line("%s: %d" % (name, count)) + else: + for item in items: + self._tw.line(item.nodeid) + return + stack: List[Node] = [] + indent = "" + for item in items: + needed_collectors = item.listchain()[1:] # strip root node + while stack: + if stack == needed_collectors[: len(stack)]: + break + stack.pop() + for col in needed_collectors[len(stack) :]: + stack.append(col) + indent = (len(stack) - 1) * " " + self._tw.line(f"{indent}{col}") + if self.config.option.verbose >= 1: + obj = getattr(col, "obj", None) + doc = inspect.getdoc(obj) if obj else None + if doc: + for line in doc.splitlines(): + self._tw.line("{}{}".format(indent + " ", line)) + + @hookimpl(hookwrapper=True) + def pytest_sessionfinish( + self, session: "Session", exitstatus: Union[int, ExitCode] + ): + outcome = yield + outcome.get_result() + self._tw.line("") + summary_exit_codes = ( + ExitCode.OK, + ExitCode.TESTS_FAILED, + ExitCode.INTERRUPTED, + ExitCode.USAGE_ERROR, + ExitCode.NO_TESTS_COLLECTED, + ) + if exitstatus in summary_exit_codes and not self.no_summary: + self.config.hook.pytest_terminal_summary( + terminalreporter=self, exitstatus=exitstatus, config=self.config + ) + if session.shouldfail: + self.write_sep("!", str(session.shouldfail), red=True) + if exitstatus == ExitCode.INTERRUPTED: + self._report_keyboardinterrupt() + self._keyboardinterrupt_memo = None + elif session.shouldstop: + self.write_sep("!", str(session.shouldstop), red=True) + self.summary_stats() + + @hookimpl(hookwrapper=True) + def pytest_terminal_summary(self) -> Generator[None, None, None]: + self.summary_errors() + self.summary_failures() + self.summary_warnings() + self.summary_passes() + yield + self.short_test_summary() + # Display any extra warnings from teardown here (if any). + self.summary_warnings() + + def pytest_keyboard_interrupt(self, excinfo: ExceptionInfo[BaseException]) -> None: + self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True) + + def pytest_unconfigure(self) -> None: + if self._keyboardinterrupt_memo is not None: + self._report_keyboardinterrupt() + + def _report_keyboardinterrupt(self) -> None: + excrepr = self._keyboardinterrupt_memo + assert excrepr is not None + assert excrepr.reprcrash is not None + msg = excrepr.reprcrash.message + self.write_sep("!", msg) + if "KeyboardInterrupt" in msg: + if self.config.option.fulltrace: + excrepr.toterminal(self._tw) + else: + excrepr.reprcrash.toterminal(self._tw) + self._tw.line( + "(to show a full traceback on KeyboardInterrupt use --full-trace)", + yellow=True, + ) + + def _locationline( + self, nodeid: str, fspath: str, lineno: Optional[int], domain: str + ) -> str: + def mkrel(nodeid: str) -> str: + line = self.config.cwd_relative_nodeid(nodeid) + if domain and line.endswith(domain): + line = line[: -len(domain)] + values = domain.split("[") + values[0] = values[0].replace(".", "::") # don't replace '.' in params + line += "[".join(values) + return line + + # collect_fspath comes from testid which has a "/"-normalized path. + if fspath: + res = mkrel(nodeid) + if self.verbosity >= 2 and nodeid.split("::")[0] != fspath.replace( + "\\", nodes.SEP + ): + res += " <- " + bestrelpath(self.startpath, Path(fspath)) + else: + res = "[location]" + return res + " " + + def _getfailureheadline(self, rep): + head_line = rep.head_line + if head_line: + return head_line + return "test session" # XXX? + + def _getcrashline(self, rep): + try: + return str(rep.longrepr.reprcrash) + except AttributeError: + try: + return str(rep.longrepr)[:50] + except AttributeError: + return "" + + # + # Summaries for sessionfinish. + # + def getreports(self, name: str): + return [x for x in self.stats.get(name, ()) if not hasattr(x, "_pdbshown")] + + def summary_warnings(self) -> None: + if self.hasopt("w"): + all_warnings: Optional[List[WarningReport]] = self.stats.get("warnings") + if not all_warnings: + return + + final = self._already_displayed_warnings is not None + if final: + warning_reports = all_warnings[self._already_displayed_warnings :] + else: + warning_reports = all_warnings + self._already_displayed_warnings = len(warning_reports) + if not warning_reports: + return + + reports_grouped_by_message: Dict[str, List[WarningReport]] = {} + for wr in warning_reports: + reports_grouped_by_message.setdefault(wr.message, []).append(wr) + + def collapsed_location_report(reports: List[WarningReport]) -> str: + locations = [] + for w in reports: + location = w.get_location(self.config) + if location: + locations.append(location) + + if len(locations) < 10: + return "\n".join(map(str, locations)) + + counts_by_filename = Counter( + str(loc).split("::", 1)[0] for loc in locations + ) + return "\n".join( + "{}: {} warning{}".format(k, v, "s" if v > 1 else "") + for k, v in counts_by_filename.items() + ) + + title = "warnings summary (final)" if final else "warnings summary" + self.write_sep("=", title, yellow=True, bold=False) + for message, message_reports in reports_grouped_by_message.items(): + maybe_location = collapsed_location_report(message_reports) + if maybe_location: + self._tw.line(maybe_location) + lines = message.splitlines() + indented = "\n".join(" " + x for x in lines) + message = indented.rstrip() + else: + message = message.rstrip() + self._tw.line(message) + self._tw.line() + self._tw.line( + "-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html" + ) + + def summary_passes(self) -> None: + if self.config.option.tbstyle != "no": + if self.hasopt("P"): + reports: List[TestReport] = self.getreports("passed") + if not reports: + return + self.write_sep("=", "PASSES") + for rep in reports: + if rep.sections: + msg = self._getfailureheadline(rep) + self.write_sep("_", msg, green=True, bold=True) + self._outrep_summary(rep) + self._handle_teardown_sections(rep.nodeid) + + def _get_teardown_reports(self, nodeid: str) -> List[TestReport]: + reports = self.getreports("") + return [ + report + for report in reports + if report.when == "teardown" and report.nodeid == nodeid + ] + + def _handle_teardown_sections(self, nodeid: str) -> None: + for report in self._get_teardown_reports(nodeid): + self.print_teardown_sections(report) + + def print_teardown_sections(self, rep: TestReport) -> None: + showcapture = self.config.option.showcapture + if showcapture == "no": + return + for secname, content in rep.sections: + if showcapture != "all" and showcapture not in secname: + continue + if "teardown" in secname: + self._tw.sep("-", secname) + if content[-1:] == "\n": + content = content[:-1] + self._tw.line(content) + + def summary_failures(self) -> None: + if self.config.option.tbstyle != "no": + reports: List[BaseReport] = self.getreports("failed") + if not reports: + return + self.write_sep("=", "FAILURES") + if self.config.option.tbstyle == "line": + for rep in reports: + line = self._getcrashline(rep) + self.write_line(line) + else: + for rep in reports: + msg = self._getfailureheadline(rep) + self.write_sep("_", msg, red=True, bold=True) + self._outrep_summary(rep) + self._handle_teardown_sections(rep.nodeid) + + def summary_errors(self) -> None: + if self.config.option.tbstyle != "no": + reports: List[BaseReport] = self.getreports("error") + if not reports: + return + self.write_sep("=", "ERRORS") + for rep in self.stats["error"]: + msg = self._getfailureheadline(rep) + if rep.when == "collect": + msg = "ERROR collecting " + msg + else: + msg = f"ERROR at {rep.when} of {msg}" + self.write_sep("_", msg, red=True, bold=True) + self._outrep_summary(rep) + + def _outrep_summary(self, rep: BaseReport) -> None: + rep.toterminal(self._tw) + showcapture = self.config.option.showcapture + if showcapture == "no": + return + for secname, content in rep.sections: + if showcapture != "all" and showcapture not in secname: + continue + self._tw.sep("-", secname) + if content[-1:] == "\n": + content = content[:-1] + self._tw.line(content) + + def summary_stats(self) -> None: + if self.verbosity < -1: + return + + session_duration = timing.time() - self._sessionstarttime + (parts, main_color) = self.build_summary_stats_line() + line_parts = [] + + display_sep = self.verbosity >= 0 + if display_sep: + fullwidth = self._tw.fullwidth + for text, markup in parts: + with_markup = self._tw.markup(text, **markup) + if display_sep: + fullwidth += len(with_markup) - len(text) + line_parts.append(with_markup) + msg = ", ".join(line_parts) + + main_markup = {main_color: True} + duration = f" in {format_session_duration(session_duration)}" + duration_with_markup = self._tw.markup(duration, **main_markup) + if display_sep: + fullwidth += len(duration_with_markup) - len(duration) + msg += duration_with_markup + + if display_sep: + markup_for_end_sep = self._tw.markup("", **main_markup) + if markup_for_end_sep.endswith("\x1b[0m"): + markup_for_end_sep = markup_for_end_sep[:-4] + fullwidth += len(markup_for_end_sep) + msg += markup_for_end_sep + + if display_sep: + self.write_sep("=", msg, fullwidth=fullwidth, **main_markup) + else: + self.write_line(msg, **main_markup) + + def short_test_summary(self) -> None: + if not self.reportchars: + return + + def show_simple(lines: List[str], *, stat: str) -> None: + failed = self.stats.get(stat, []) + if not failed: + return + config = self.config + for rep in failed: + color = _color_for_type.get(stat, _color_for_type_default) + line = _get_line_with_reprcrash_message( + config, rep, self._tw, {color: True} + ) + lines.append(line) + + def show_xfailed(lines: List[str]) -> None: + xfailed = self.stats.get("xfailed", []) + for rep in xfailed: + verbose_word = rep._get_verbose_word(self.config) + markup_word = self._tw.markup( + verbose_word, **{_color_for_type["warnings"]: True} + ) + nodeid = _get_node_id_with_markup(self._tw, self.config, rep) + line = f"{markup_word} {nodeid}" + reason = rep.wasxfail + if reason: + line += " - " + str(reason) + + lines.append(line) + + def show_xpassed(lines: List[str]) -> None: + xpassed = self.stats.get("xpassed", []) + for rep in xpassed: + verbose_word = rep._get_verbose_word(self.config) + markup_word = self._tw.markup( + verbose_word, **{_color_for_type["warnings"]: True} + ) + nodeid = _get_node_id_with_markup(self._tw, self.config, rep) + reason = rep.wasxfail + lines.append(f"{markup_word} {nodeid} {reason}") + + def show_skipped(lines: List[str]) -> None: + skipped: List[CollectReport] = self.stats.get("skipped", []) + fskips = _folded_skips(self.startpath, skipped) if skipped else [] + if not fskips: + return + verbose_word = skipped[0]._get_verbose_word(self.config) + markup_word = self._tw.markup( + verbose_word, **{_color_for_type["warnings"]: True} + ) + prefix = "Skipped: " + for num, fspath, lineno, reason in fskips: + if reason.startswith(prefix): + reason = reason[len(prefix) :] + if lineno is not None: + lines.append( + "%s [%d] %s:%d: %s" % (markup_word, num, fspath, lineno, reason) + ) + else: + lines.append("%s [%d] %s: %s" % (markup_word, num, fspath, reason)) + + REPORTCHAR_ACTIONS: Mapping[str, Callable[[List[str]], None]] = { + "x": show_xfailed, + "X": show_xpassed, + "f": partial(show_simple, stat="failed"), + "s": show_skipped, + "p": partial(show_simple, stat="passed"), + "E": partial(show_simple, stat="error"), + } + + lines: List[str] = [] + for char in self.reportchars: + action = REPORTCHAR_ACTIONS.get(char) + if action: # skipping e.g. "P" (passed with output) here. + action(lines) + + if lines: + self.write_sep("=", "short test summary info", cyan=True, bold=True) + for line in lines: + self.write_line(line) + + def _get_main_color(self) -> Tuple[str, List[str]]: + if self._main_color is None or self._known_types is None or self._is_last_item: + self._set_main_color() + assert self._main_color + assert self._known_types + return self._main_color, self._known_types + + def _determine_main_color(self, unknown_type_seen: bool) -> str: + stats = self.stats + if "failed" in stats or "error" in stats: + main_color = "red" + elif "warnings" in stats or "xpassed" in stats or unknown_type_seen: + main_color = "yellow" + elif "passed" in stats or not self._is_last_item: + main_color = "green" + else: + main_color = "yellow" + return main_color + + def _set_main_color(self) -> None: + unknown_types: List[str] = [] + for found_type in self.stats.keys(): + if found_type: # setup/teardown reports have an empty key, ignore them + if found_type not in KNOWN_TYPES and found_type not in unknown_types: + unknown_types.append(found_type) + self._known_types = list(KNOWN_TYPES) + unknown_types + self._main_color = self._determine_main_color(bool(unknown_types)) + + def build_summary_stats_line(self) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]: + """ + Build the parts used in the last summary stats line. + + The summary stats line is the line shown at the end, "=== 12 passed, 2 errors in Xs===". + + This function builds a list of the "parts" that make up for the text in that line, in + the example above it would be: + + [ + ("12 passed", {"green": True}), + ("2 errors", {"red": True} + ] + + That last dict for each line is a "markup dictionary", used by TerminalWriter to + color output. + + The final color of the line is also determined by this function, and is the second + element of the returned tuple. + """ + if self.config.getoption("collectonly"): + return self._build_collect_only_summary_stats_line() + else: + return self._build_normal_summary_stats_line() + + def _get_reports_to_display(self, key: str) -> List[Any]: + """Get test/collection reports for the given status key, such as `passed` or `error`.""" + reports = self.stats.get(key, []) + return [x for x in reports if getattr(x, "count_towards_summary", True)] + + def _build_normal_summary_stats_line( + self, + ) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]: + main_color, known_types = self._get_main_color() + parts = [] + + for key in known_types: + reports = self._get_reports_to_display(key) + if reports: + count = len(reports) + color = _color_for_type.get(key, _color_for_type_default) + markup = {color: True, "bold": color == main_color} + parts.append(("%d %s" % pluralize(count, key), markup)) + + if not parts: + parts = [("no tests ran", {_color_for_type_default: True})] + + return parts, main_color + + def _build_collect_only_summary_stats_line( + self, + ) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]: + deselected = len(self._get_reports_to_display("deselected")) + errors = len(self._get_reports_to_display("error")) + + if self._numcollected == 0: + parts = [("no tests collected", {"yellow": True})] + main_color = "yellow" + + elif deselected == 0: + main_color = "green" + collected_output = "%d %s collected" % pluralize(self._numcollected, "test") + parts = [(collected_output, {main_color: True})] + else: + all_tests_were_deselected = self._numcollected == deselected + if all_tests_were_deselected: + main_color = "yellow" + collected_output = f"no tests collected ({deselected} deselected)" + else: + main_color = "green" + selected = self._numcollected - deselected + collected_output = f"{selected}/{self._numcollected} tests collected ({deselected} deselected)" + + parts = [(collected_output, {main_color: True})] + + if errors: + main_color = _color_for_type["error"] + parts += [("%d %s" % pluralize(errors, "error"), {main_color: True})] + + return parts, main_color + + +def _get_node_id_with_markup(tw: TerminalWriter, config: Config, rep: BaseReport): + nodeid = config.cwd_relative_nodeid(rep.nodeid) + path, *parts = nodeid.split("::") + if parts: + parts_markup = tw.markup("::".join(parts), bold=True) + return path + "::" + parts_markup + else: + return path + + +def _format_trimmed(format: str, msg: str, available_width: int) -> Optional[str]: + """Format msg into format, ellipsizing it if doesn't fit in available_width. + + Returns None if even the ellipsis can't fit. + """ + # Only use the first line. + i = msg.find("\n") + if i != -1: + msg = msg[:i] + + ellipsis = "..." + format_width = wcswidth(format.format("")) + if format_width + len(ellipsis) > available_width: + return None + + if format_width + wcswidth(msg) > available_width: + available_width -= len(ellipsis) + msg = msg[:available_width] + while format_width + wcswidth(msg) > available_width: + msg = msg[:-1] + msg += ellipsis + + return format.format(msg) + + +def _get_line_with_reprcrash_message( + config: Config, rep: BaseReport, tw: TerminalWriter, word_markup: Dict[str, bool] +) -> str: + """Get summary line for a report, trying to add reprcrash message.""" + verbose_word = rep._get_verbose_word(config) + word = tw.markup(verbose_word, **word_markup) + node = _get_node_id_with_markup(tw, config, rep) + + line = f"{word} {node}" + line_width = wcswidth(line) + + try: + # Type ignored intentionally -- possible AttributeError expected. + msg = rep.longrepr.reprcrash.message # type: ignore[union-attr] + except AttributeError: + pass + else: + if not running_on_ci(): + available_width = tw.fullwidth - line_width + msg = _format_trimmed(" - {}", msg, available_width) + else: + msg = f" - {msg}" + if msg is not None: + line += msg + + return line + + +def _folded_skips( + startpath: Path, + skipped: Sequence[CollectReport], +) -> List[Tuple[int, str, Optional[int], str]]: + d: Dict[Tuple[str, Optional[int], str], List[CollectReport]] = {} + for event in skipped: + assert event.longrepr is not None + assert isinstance(event.longrepr, tuple), (event, event.longrepr) + assert len(event.longrepr) == 3, (event, event.longrepr) + fspath, lineno, reason = event.longrepr + # For consistency, report all fspaths in relative form. + fspath = bestrelpath(startpath, Path(fspath)) + keywords = getattr(event, "keywords", {}) + # Folding reports with global pytestmark variable. + # This is a workaround, because for now we cannot identify the scope of a skip marker + # TODO: Revisit after marks scope would be fixed. + if ( + event.when == "setup" + and "skip" in keywords + and "pytestmark" not in keywords + ): + key: Tuple[str, Optional[int], str] = (fspath, None, reason) + else: + key = (fspath, lineno, reason) + d.setdefault(key, []).append(event) + values: List[Tuple[int, str, Optional[int], str]] = [] + for key, events in d.items(): + values.append((len(events), *key)) + return values + + +_color_for_type = { + "failed": "red", + "error": "red", + "warnings": "yellow", + "passed": "green", +} +_color_for_type_default = "yellow" + + +def pluralize(count: int, noun: str) -> Tuple[int, str]: + # No need to pluralize words such as `failed` or `passed`. + if noun not in ["error", "warnings", "test"]: + return count, noun + + # The `warnings` key is plural. To avoid API breakage, we keep it that way but + # set it to singular here so we can determine plurality in the same way as we do + # for `error`. + noun = noun.replace("warnings", "warning") + + return count, noun + "s" if count != 1 else noun + + +def _plugin_nameversions(plugininfo) -> List[str]: + values: List[str] = [] + for plugin, dist in plugininfo: + # Gets us name and version! + name = "{dist.project_name}-{dist.version}".format(dist=dist) + # Questionable convenience, but it keeps things short. + if name.startswith("pytest-"): + name = name[7:] + # We decided to print python package names they can have more than one plugin. + if name not in values: + values.append(name) + return values + + +def format_session_duration(seconds: float) -> str: + """Format the given seconds in a human readable manner to show in the final summary.""" + if seconds < 60: + return f"{seconds:.2f}s" + else: + dt = datetime.timedelta(seconds=int(seconds)) + return f"{seconds:.2f}s ({dt})" + + +def _get_raw_skip_reason(report: TestReport) -> str: + """Get the reason string of a skip/xfail/xpass test report. + + The string is just the part given by the user. + """ + if hasattr(report, "wasxfail"): + reason = cast(str, report.wasxfail) + if reason.startswith("reason: "): + reason = reason[len("reason: ") :] + return reason + else: + assert report.skipped + assert isinstance(report.longrepr, tuple) + _, _, reason = report.longrepr + if reason.startswith("Skipped: "): + reason = reason[len("Skipped: ") :] + elif reason == "Skipped": + reason = "" + return reason diff --git a/venv/lib/python3.12/site-packages/_pytest/threadexception.py b/venv/lib/python3.12/site-packages/_pytest/threadexception.py new file mode 100644 index 0000000000..43341e739a --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/threadexception.py @@ -0,0 +1,88 @@ +import threading +import traceback +import warnings +from types import TracebackType +from typing import Any +from typing import Callable +from typing import Generator +from typing import Optional +from typing import Type + +import pytest + + +# Copied from cpython/Lib/test/support/threading_helper.py, with modifications. +class catch_threading_exception: + """Context manager catching threading.Thread exception using + threading.excepthook. + + Storing exc_value using a custom hook can create a reference cycle. The + reference cycle is broken explicitly when the context manager exits. + + Storing thread using a custom hook can resurrect it if it is set to an + object which is being finalized. Exiting the context manager clears the + stored object. + + Usage: + with threading_helper.catch_threading_exception() as cm: + # code spawning a thread which raises an exception + ... + # check the thread exception: use cm.args + ... + # cm.args attribute no longer exists at this point + # (to break a reference cycle) + """ + + def __init__(self) -> None: + self.args: Optional["threading.ExceptHookArgs"] = None + self._old_hook: Optional[Callable[["threading.ExceptHookArgs"], Any]] = None + + def _hook(self, args: "threading.ExceptHookArgs") -> None: + self.args = args + + def __enter__(self) -> "catch_threading_exception": + self._old_hook = threading.excepthook + threading.excepthook = self._hook + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + assert self._old_hook is not None + threading.excepthook = self._old_hook + self._old_hook = None + del self.args + + +def thread_exception_runtest_hook() -> Generator[None, None, None]: + with catch_threading_exception() as cm: + yield + if cm.args: + thread_name = "" if cm.args.thread is None else cm.args.thread.name + msg = f"Exception in thread {thread_name}\n\n" + msg += "".join( + traceback.format_exception( + cm.args.exc_type, + cm.args.exc_value, + cm.args.exc_traceback, + ) + ) + warnings.warn(pytest.PytestUnhandledThreadExceptionWarning(msg)) + + +@pytest.hookimpl(hookwrapper=True, trylast=True) +def pytest_runtest_setup() -> Generator[None, None, None]: + yield from thread_exception_runtest_hook() + + +@pytest.hookimpl(hookwrapper=True, tryfirst=True) +def pytest_runtest_call() -> Generator[None, None, None]: + yield from thread_exception_runtest_hook() + + +@pytest.hookimpl(hookwrapper=True, tryfirst=True) +def pytest_runtest_teardown() -> Generator[None, None, None]: + yield from thread_exception_runtest_hook() diff --git a/venv/lib/python3.12/site-packages/_pytest/timing.py b/venv/lib/python3.12/site-packages/_pytest/timing.py new file mode 100644 index 0000000000..925163a585 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/timing.py @@ -0,0 +1,12 @@ +"""Indirection for time functions. + +We intentionally grab some "time" functions internally to avoid tests mocking "time" to affect +pytest runtime information (issue #185). + +Fixture "mock_timing" also interacts with this module for pytest's own tests. +""" +from time import perf_counter +from time import sleep +from time import time + +__all__ = ["perf_counter", "sleep", "time"] diff --git a/venv/lib/python3.12/site-packages/_pytest/tmpdir.py b/venv/lib/python3.12/site-packages/_pytest/tmpdir.py new file mode 100644 index 0000000000..3cc2bace55 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/tmpdir.py @@ -0,0 +1,324 @@ +"""Support for providing temporary directories to test functions.""" +import dataclasses +import os +import re +import tempfile +from pathlib import Path +from shutil import rmtree +from typing import Any +from typing import Dict +from typing import Generator +from typing import Optional +from typing import TYPE_CHECKING +from typing import Union + +from _pytest.nodes import Item +from _pytest.reports import CollectReport +from _pytest.stash import StashKey + +if TYPE_CHECKING: + from typing_extensions import Literal + + RetentionType = Literal["all", "failed", "none"] + + +from _pytest.config.argparsing import Parser + +from .pathlib import LOCK_TIMEOUT +from .pathlib import make_numbered_dir +from .pathlib import make_numbered_dir_with_cleanup +from .pathlib import rm_rf +from .pathlib import cleanup_dead_symlinks +from _pytest.compat import final, get_user_id +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.monkeypatch import MonkeyPatch + +tmppath_result_key = StashKey[Dict[str, bool]]() + + +@final +@dataclasses.dataclass +class TempPathFactory: + """Factory for temporary directories under the common base temp directory. + + The base directory can be configured using the ``--basetemp`` option. + """ + + _given_basetemp: Optional[Path] + # pluggy TagTracerSub, not currently exposed, so Any. + _trace: Any + _basetemp: Optional[Path] + _retention_count: int + _retention_policy: "RetentionType" + + def __init__( + self, + given_basetemp: Optional[Path], + retention_count: int, + retention_policy: "RetentionType", + trace, + basetemp: Optional[Path] = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + if given_basetemp is None: + self._given_basetemp = None + else: + # Use os.path.abspath() to get absolute path instead of resolve() as it + # does not work the same in all platforms (see #4427). + # Path.absolute() exists, but it is not public (see https://bugs.python.org/issue25012). + self._given_basetemp = Path(os.path.abspath(str(given_basetemp))) + self._trace = trace + self._retention_count = retention_count + self._retention_policy = retention_policy + self._basetemp = basetemp + + @classmethod + def from_config( + cls, + config: Config, + *, + _ispytest: bool = False, + ) -> "TempPathFactory": + """Create a factory according to pytest configuration. + + :meta private: + """ + check_ispytest(_ispytest) + count = int(config.getini("tmp_path_retention_count")) + if count < 0: + raise ValueError( + f"tmp_path_retention_count must be >= 0. Current input: {count}." + ) + + policy = config.getini("tmp_path_retention_policy") + if policy not in ("all", "failed", "none"): + raise ValueError( + f"tmp_path_retention_policy must be either all, failed, none. Current input: {policy}." + ) + + return cls( + given_basetemp=config.option.basetemp, + trace=config.trace.get("tmpdir"), + retention_count=count, + retention_policy=policy, + _ispytest=True, + ) + + def _ensure_relative_to_basetemp(self, basename: str) -> str: + basename = os.path.normpath(basename) + if (self.getbasetemp() / basename).resolve().parent != self.getbasetemp(): + raise ValueError(f"{basename} is not a normalized and relative path") + return basename + + def mktemp(self, basename: str, numbered: bool = True) -> Path: + """Create a new temporary directory managed by the factory. + + :param basename: + Directory base name, must be a relative path. + + :param numbered: + If ``True``, ensure the directory is unique by adding a numbered + suffix greater than any existing one: ``basename="foo-"`` and ``numbered=True`` + means that this function will create directories named ``"foo-0"``, + ``"foo-1"``, ``"foo-2"`` and so on. + + :returns: + The path to the new directory. + """ + basename = self._ensure_relative_to_basetemp(basename) + if not numbered: + p = self.getbasetemp().joinpath(basename) + p.mkdir(mode=0o700) + else: + p = make_numbered_dir(root=self.getbasetemp(), prefix=basename, mode=0o700) + self._trace("mktemp", p) + return p + + def getbasetemp(self) -> Path: + """Return the base temporary directory, creating it if needed. + + :returns: + The base temporary directory. + """ + if self._basetemp is not None: + return self._basetemp + + if self._given_basetemp is not None: + basetemp = self._given_basetemp + if basetemp.exists(): + rm_rf(basetemp) + basetemp.mkdir(mode=0o700) + basetemp = basetemp.resolve() + else: + from_env = os.environ.get("PYTEST_DEBUG_TEMPROOT") + temproot = Path(from_env or tempfile.gettempdir()).resolve() + user = get_user() or "unknown" + # use a sub-directory in the temproot to speed-up + # make_numbered_dir() call + rootdir = temproot.joinpath(f"pytest-of-{user}") + try: + rootdir.mkdir(mode=0o700, exist_ok=True) + except OSError: + # getuser() likely returned illegal characters for the platform, use unknown back off mechanism + rootdir = temproot.joinpath("pytest-of-unknown") + rootdir.mkdir(mode=0o700, exist_ok=True) + # Because we use exist_ok=True with a predictable name, make sure + # we are the owners, to prevent any funny business (on unix, where + # temproot is usually shared). + # Also, to keep things private, fixup any world-readable temp + # rootdir's permissions. Historically 0o755 was used, so we can't + # just error out on this, at least for a while. + uid = get_user_id() + if uid is not None: + rootdir_stat = rootdir.stat() + if rootdir_stat.st_uid != uid: + raise OSError( + f"The temporary directory {rootdir} is not owned by the current user. " + "Fix this and try again." + ) + if (rootdir_stat.st_mode & 0o077) != 0: + os.chmod(rootdir, rootdir_stat.st_mode & ~0o077) + keep = self._retention_count + if self._retention_policy == "none": + keep = 0 + basetemp = make_numbered_dir_with_cleanup( + prefix="pytest-", + root=rootdir, + keep=keep, + lock_timeout=LOCK_TIMEOUT, + mode=0o700, + ) + assert basetemp is not None, basetemp + self._basetemp = basetemp + self._trace("new basetemp", basetemp) + return basetemp + + +def get_user() -> Optional[str]: + """Return the current user name, or None if getuser() does not work + in the current environment (see #1010).""" + try: + # In some exotic environments, getpass may not be importable. + import getpass + + return getpass.getuser() + except (ImportError, KeyError): + return None + + +def pytest_configure(config: Config) -> None: + """Create a TempPathFactory and attach it to the config object. + + This is to comply with existing plugins which expect the handler to be + available at pytest_configure time, but ideally should be moved entirely + to the tmp_path_factory session fixture. + """ + mp = MonkeyPatch() + config.add_cleanup(mp.undo) + _tmp_path_factory = TempPathFactory.from_config(config, _ispytest=True) + mp.setattr(config, "_tmp_path_factory", _tmp_path_factory, raising=False) + + +def pytest_addoption(parser: Parser) -> None: + parser.addini( + "tmp_path_retention_count", + help="How many sessions should we keep the `tmp_path` directories, according to `tmp_path_retention_policy`.", + default=3, + ) + + parser.addini( + "tmp_path_retention_policy", + help="Controls which directories created by the `tmp_path` fixture are kept around, based on test outcome. " + "(all/failed/none)", + default="all", + ) + + +@fixture(scope="session") +def tmp_path_factory(request: FixtureRequest) -> TempPathFactory: + """Return a :class:`pytest.TempPathFactory` instance for the test session.""" + # Set dynamically by pytest_configure() above. + return request.config._tmp_path_factory # type: ignore + + +def _mk_tmp(request: FixtureRequest, factory: TempPathFactory) -> Path: + name = request.node.name + name = re.sub(r"[\W]", "_", name) + MAXVAL = 30 + name = name[:MAXVAL] + return factory.mktemp(name, numbered=True) + + +@fixture +def tmp_path( + request: FixtureRequest, tmp_path_factory: TempPathFactory +) -> Generator[Path, None, None]: + """Return a temporary directory path object which is unique to each test + function invocation, created as a sub directory of the base temporary + directory. + + By default, a new base temporary directory is created each test session, + and old bases are removed after 3 sessions, to aid in debugging. + This behavior can be configured with :confval:`tmp_path_retention_count` and + :confval:`tmp_path_retention_policy`. + If ``--basetemp`` is used then it is cleared each session. See :ref:`base + temporary directory`. + + The returned object is a :class:`pathlib.Path` object. + """ + + path = _mk_tmp(request, tmp_path_factory) + yield path + + # Remove the tmpdir if the policy is "failed" and the test passed. + tmp_path_factory: TempPathFactory = request.session.config._tmp_path_factory # type: ignore + policy = tmp_path_factory._retention_policy + result_dict = request.node.stash[tmppath_result_key] + + if policy == "failed" and result_dict.get("call", True): + # We do a "best effort" to remove files, but it might not be possible due to some leaked resource, + # permissions, etc, in which case we ignore it. + rmtree(path, ignore_errors=True) + + del request.node.stash[tmppath_result_key] + + +def pytest_sessionfinish(session, exitstatus: Union[int, ExitCode]): + """After each session, remove base directory if all the tests passed, + the policy is "failed", and the basetemp is not specified by a user. + """ + tmp_path_factory: TempPathFactory = session.config._tmp_path_factory + basetemp = tmp_path_factory._basetemp + if basetemp is None: + return + + policy = tmp_path_factory._retention_policy + if ( + exitstatus == 0 + and policy == "failed" + and tmp_path_factory._given_basetemp is None + ): + if basetemp.is_dir(): + # We do a "best effort" to remove files, but it might not be possible due to some leaked resource, + # permissions, etc, in which case we ignore it. + rmtree(basetemp, ignore_errors=True) + + # Remove dead symlinks. + if basetemp.is_dir(): + cleanup_dead_symlinks(basetemp) + + +@hookimpl(tryfirst=True, hookwrapper=True) +def pytest_runtest_makereport(item: Item, call): + outcome = yield + result: CollectReport = outcome.get_result() + + empty: Dict[str, bool] = {} + item.stash.setdefault(tmppath_result_key, empty)[result.when] = result.passed diff --git a/venv/lib/python3.12/site-packages/_pytest/unittest.py b/venv/lib/python3.12/site-packages/_pytest/unittest.py new file mode 100644 index 0000000000..d42a12a3a9 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/unittest.py @@ -0,0 +1,421 @@ +"""Discover and run std-library "unittest" style tests.""" +import sys +import traceback +import types +from typing import Any +from typing import Callable +from typing import Generator +from typing import Iterable +from typing import List +from typing import Optional +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import Union + +import _pytest._code +import pytest +from _pytest.compat import getimfunc +from _pytest.compat import is_async_function +from _pytest.config import hookimpl +from _pytest.fixtures import FixtureRequest +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.outcomes import exit +from _pytest.outcomes import fail +from _pytest.outcomes import skip +from _pytest.outcomes import xfail +from _pytest.python import Class +from _pytest.python import Function +from _pytest.python import Module +from _pytest.runner import CallInfo +from _pytest.scope import Scope + +if TYPE_CHECKING: + import unittest + import twisted.trial.unittest + + _SysExcInfoType = Union[ + Tuple[Type[BaseException], BaseException, types.TracebackType], + Tuple[None, None, None], + ] + + +def pytest_pycollect_makeitem( + collector: Union[Module, Class], name: str, obj: object +) -> Optional["UnitTestCase"]: + # Has unittest been imported and is obj a subclass of its TestCase? + try: + ut = sys.modules["unittest"] + # Type ignored because `ut` is an opaque module. + if not issubclass(obj, ut.TestCase): # type: ignore + return None + except Exception: + return None + # Yes, so let's collect it. + item: UnitTestCase = UnitTestCase.from_parent(collector, name=name, obj=obj) + return item + + +class UnitTestCase(Class): + # Marker for fixturemanger.getfixtureinfo() + # to declare that our children do not support funcargs. + nofuncargs = True + + def collect(self) -> Iterable[Union[Item, Collector]]: + from unittest import TestLoader + + cls = self.obj + if not getattr(cls, "__test__", True): + return + + skipped = _is_skipped(cls) + if not skipped: + self._inject_setup_teardown_fixtures(cls) + self._inject_setup_class_fixture() + + self.session._fixturemanager.parsefactories(self, unittest=True) + loader = TestLoader() + foundsomething = False + for name in loader.getTestCaseNames(self.obj): + x = getattr(self.obj, name) + if not getattr(x, "__test__", True): + continue + funcobj = getimfunc(x) + yield TestCaseFunction.from_parent(self, name=name, callobj=funcobj) + foundsomething = True + + if not foundsomething: + runtest = getattr(self.obj, "runTest", None) + if runtest is not None: + ut = sys.modules.get("twisted.trial.unittest", None) + # Type ignored because `ut` is an opaque module. + if ut is None or runtest != ut.TestCase.runTest: # type: ignore + yield TestCaseFunction.from_parent(self, name="runTest") + + def _inject_setup_teardown_fixtures(self, cls: type) -> None: + """Injects a hidden auto-use fixture to invoke setUpClass/setup_method and corresponding + teardown functions (#517).""" + class_fixture = _make_xunit_fixture( + cls, + "setUpClass", + "tearDownClass", + "doClassCleanups", + scope=Scope.Class, + pass_self=False, + ) + if class_fixture: + cls.__pytest_class_setup = class_fixture # type: ignore[attr-defined] + + method_fixture = _make_xunit_fixture( + cls, + "setup_method", + "teardown_method", + None, + scope=Scope.Function, + pass_self=True, + ) + if method_fixture: + cls.__pytest_method_setup = method_fixture # type: ignore[attr-defined] + + +def _make_xunit_fixture( + obj: type, + setup_name: str, + teardown_name: str, + cleanup_name: Optional[str], + scope: Scope, + pass_self: bool, +): + setup = getattr(obj, setup_name, None) + teardown = getattr(obj, teardown_name, None) + if setup is None and teardown is None: + return None + + if cleanup_name: + cleanup = getattr(obj, cleanup_name, lambda *args: None) + else: + + def cleanup(*args): + pass + + @pytest.fixture( + scope=scope.value, + autouse=True, + # Use a unique name to speed up lookup. + name=f"_unittest_{setup_name}_fixture_{obj.__qualname__}", + ) + def fixture(self, request: FixtureRequest) -> Generator[None, None, None]: + if _is_skipped(self): + reason = self.__unittest_skip_why__ + raise pytest.skip.Exception(reason, _use_item_location=True) + if setup is not None: + try: + if pass_self: + setup(self, request.function) + else: + setup() + # unittest does not call the cleanup function for every BaseException, so we + # follow this here. + except Exception: + if pass_self: + cleanup(self) + else: + cleanup() + + raise + yield + try: + if teardown is not None: + if pass_self: + teardown(self, request.function) + else: + teardown() + finally: + if pass_self: + cleanup(self) + else: + cleanup() + + return fixture + + +class TestCaseFunction(Function): + nofuncargs = True + _excinfo: Optional[List[_pytest._code.ExceptionInfo[BaseException]]] = None + _testcase: Optional["unittest.TestCase"] = None + + def _getobj(self): + assert self.parent is not None + # Unlike a regular Function in a Class, where `item.obj` returns + # a *bound* method (attached to an instance), TestCaseFunction's + # `obj` returns an *unbound* method (not attached to an instance). + # This inconsistency is probably not desirable, but needs some + # consideration before changing. + return getattr(self.parent.obj, self.originalname) # type: ignore[attr-defined] + + def setup(self) -> None: + # A bound method to be called during teardown() if set (see 'runtest()'). + self._explicit_tearDown: Optional[Callable[[], None]] = None + assert self.parent is not None + self._testcase = self.parent.obj(self.name) # type: ignore[attr-defined] + self._obj = getattr(self._testcase, self.name) + if hasattr(self, "_request"): + self._request._fillfixtures() + + def teardown(self) -> None: + if self._explicit_tearDown is not None: + self._explicit_tearDown() + self._explicit_tearDown = None + self._testcase = None + self._obj = None + + def startTest(self, testcase: "unittest.TestCase") -> None: + pass + + def _addexcinfo(self, rawexcinfo: "_SysExcInfoType") -> None: + # Unwrap potential exception info (see twisted trial support below). + rawexcinfo = getattr(rawexcinfo, "_rawexcinfo", rawexcinfo) + try: + excinfo = _pytest._code.ExceptionInfo[BaseException].from_exc_info(rawexcinfo) # type: ignore[arg-type] + # Invoke the attributes to trigger storing the traceback + # trial causes some issue there. + excinfo.value + excinfo.traceback + except TypeError: + try: + try: + values = traceback.format_exception(*rawexcinfo) + values.insert( + 0, + "NOTE: Incompatible Exception Representation, " + "displaying natively:\n\n", + ) + fail("".join(values), pytrace=False) + except (fail.Exception, KeyboardInterrupt): + raise + except BaseException: + fail( + "ERROR: Unknown Incompatible Exception " + "representation:\n%r" % (rawexcinfo,), + pytrace=False, + ) + except KeyboardInterrupt: + raise + except fail.Exception: + excinfo = _pytest._code.ExceptionInfo.from_current() + self.__dict__.setdefault("_excinfo", []).append(excinfo) + + def addError( + self, testcase: "unittest.TestCase", rawexcinfo: "_SysExcInfoType" + ) -> None: + try: + if isinstance(rawexcinfo[1], exit.Exception): + exit(rawexcinfo[1].msg) + except TypeError: + pass + self._addexcinfo(rawexcinfo) + + def addFailure( + self, testcase: "unittest.TestCase", rawexcinfo: "_SysExcInfoType" + ) -> None: + self._addexcinfo(rawexcinfo) + + def addSkip(self, testcase: "unittest.TestCase", reason: str) -> None: + try: + raise pytest.skip.Exception(reason, _use_item_location=True) + except skip.Exception: + self._addexcinfo(sys.exc_info()) + + def addExpectedFailure( + self, + testcase: "unittest.TestCase", + rawexcinfo: "_SysExcInfoType", + reason: str = "", + ) -> None: + try: + xfail(str(reason)) + except xfail.Exception: + self._addexcinfo(sys.exc_info()) + + def addUnexpectedSuccess( + self, + testcase: "unittest.TestCase", + reason: Optional["twisted.trial.unittest.Todo"] = None, + ) -> None: + msg = "Unexpected success" + if reason: + msg += f": {reason.reason}" + # Preserve unittest behaviour - fail the test. Explicitly not an XPASS. + try: + fail(msg, pytrace=False) + except fail.Exception: + self._addexcinfo(sys.exc_info()) + + def addSuccess(self, testcase: "unittest.TestCase") -> None: + pass + + def stopTest(self, testcase: "unittest.TestCase") -> None: + pass + + def addDuration(self, testcase: "unittest.TestCase", elapsed: float) -> None: + pass + + def runtest(self) -> None: + from _pytest.debugging import maybe_wrap_pytest_function_for_tracing + + assert self._testcase is not None + + maybe_wrap_pytest_function_for_tracing(self) + + # Let the unittest framework handle async functions. + if is_async_function(self.obj): + # Type ignored because self acts as the TestResult, but is not actually one. + self._testcase(result=self) # type: ignore[arg-type] + else: + # When --pdb is given, we want to postpone calling tearDown() otherwise + # when entering the pdb prompt, tearDown() would have probably cleaned up + # instance variables, which makes it difficult to debug. + # Arguably we could always postpone tearDown(), but this changes the moment where the + # TestCase instance interacts with the results object, so better to only do it + # when absolutely needed. + # We need to consider if the test itself is skipped, or the whole class. + assert isinstance(self.parent, UnitTestCase) + skipped = _is_skipped(self.obj) or _is_skipped(self.parent.obj) + if self.config.getoption("usepdb") and not skipped: + self._explicit_tearDown = self._testcase.tearDown + setattr(self._testcase, "tearDown", lambda *args: None) + + # We need to update the actual bound method with self.obj, because + # wrap_pytest_function_for_tracing replaces self.obj by a wrapper. + setattr(self._testcase, self.name, self.obj) + try: + self._testcase(result=self) # type: ignore[arg-type] + finally: + delattr(self._testcase, self.name) + + def _traceback_filter( + self, excinfo: _pytest._code.ExceptionInfo[BaseException] + ) -> _pytest._code.Traceback: + traceback = super()._traceback_filter(excinfo) + ntraceback = traceback.filter( + lambda x: not x.frame.f_globals.get("__unittest"), + ) + if not ntraceback: + ntraceback = traceback + return ntraceback + + +@hookimpl(tryfirst=True) +def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> None: + if isinstance(item, TestCaseFunction): + if item._excinfo: + call.excinfo = item._excinfo.pop(0) + try: + del call.result + except AttributeError: + pass + + # Convert unittest.SkipTest to pytest.skip. + # This is actually only needed for nose, which reuses unittest.SkipTest for + # its own nose.SkipTest. For unittest TestCases, SkipTest is already + # handled internally, and doesn't reach here. + unittest = sys.modules.get("unittest") + if ( + unittest + and call.excinfo + and isinstance(call.excinfo.value, unittest.SkipTest) # type: ignore[attr-defined] + ): + excinfo = call.excinfo + call2 = CallInfo[None].from_call( + lambda: pytest.skip(str(excinfo.value)), call.when + ) + call.excinfo = call2.excinfo + + +# Twisted trial support. + + +@hookimpl(hookwrapper=True) +def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]: + if isinstance(item, TestCaseFunction) and "twisted.trial.unittest" in sys.modules: + ut: Any = sys.modules["twisted.python.failure"] + Failure__init__ = ut.Failure.__init__ + check_testcase_implements_trial_reporter() + + def excstore( + self, exc_value=None, exc_type=None, exc_tb=None, captureVars=None + ): + if exc_value is None: + self._rawexcinfo = sys.exc_info() + else: + if exc_type is None: + exc_type = type(exc_value) + self._rawexcinfo = (exc_type, exc_value, exc_tb) + try: + Failure__init__( + self, exc_value, exc_type, exc_tb, captureVars=captureVars + ) + except TypeError: + Failure__init__(self, exc_value, exc_type, exc_tb) + + ut.Failure.__init__ = excstore + yield + ut.Failure.__init__ = Failure__init__ + else: + yield + + +def check_testcase_implements_trial_reporter(done: List[int] = []) -> None: + if done: + return + from zope.interface import classImplements + from twisted.trial.itrial import IReporter + + classImplements(TestCaseFunction, IReporter) + done.append(1) + + +def _is_skipped(obj) -> bool: + """Return True if the given object has been marked with @unittest.skip.""" + return bool(getattr(obj, "__unittest_skip__", False)) diff --git a/venv/lib/python3.12/site-packages/_pytest/unraisableexception.py b/venv/lib/python3.12/site-packages/_pytest/unraisableexception.py new file mode 100644 index 0000000000..fcb5d8237c --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/unraisableexception.py @@ -0,0 +1,93 @@ +import sys +import traceback +import warnings +from types import TracebackType +from typing import Any +from typing import Callable +from typing import Generator +from typing import Optional +from typing import Type + +import pytest + + +# Copied from cpython/Lib/test/support/__init__.py, with modifications. +class catch_unraisable_exception: + """Context manager catching unraisable exception using sys.unraisablehook. + + Storing the exception value (cm.unraisable.exc_value) creates a reference + cycle. The reference cycle is broken explicitly when the context manager + exits. + + Storing the object (cm.unraisable.object) can resurrect it if it is set to + an object which is being finalized. Exiting the context manager clears the + stored object. + + Usage: + with catch_unraisable_exception() as cm: + # code creating an "unraisable exception" + ... + # check the unraisable exception: use cm.unraisable + ... + # cm.unraisable attribute no longer exists at this point + # (to break a reference cycle) + """ + + def __init__(self) -> None: + self.unraisable: Optional["sys.UnraisableHookArgs"] = None + self._old_hook: Optional[Callable[["sys.UnraisableHookArgs"], Any]] = None + + def _hook(self, unraisable: "sys.UnraisableHookArgs") -> None: + # Storing unraisable.object can resurrect an object which is being + # finalized. Storing unraisable.exc_value creates a reference cycle. + self.unraisable = unraisable + + def __enter__(self) -> "catch_unraisable_exception": + self._old_hook = sys.unraisablehook + sys.unraisablehook = self._hook + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + assert self._old_hook is not None + sys.unraisablehook = self._old_hook + self._old_hook = None + del self.unraisable + + +def unraisable_exception_runtest_hook() -> Generator[None, None, None]: + with catch_unraisable_exception() as cm: + yield + if cm.unraisable: + if cm.unraisable.err_msg is not None: + err_msg = cm.unraisable.err_msg + else: + err_msg = "Exception ignored in" + msg = f"{err_msg}: {cm.unraisable.object!r}\n\n" + msg += "".join( + traceback.format_exception( + cm.unraisable.exc_type, + cm.unraisable.exc_value, + cm.unraisable.exc_traceback, + ) + ) + warnings.warn(pytest.PytestUnraisableExceptionWarning(msg)) + + +@pytest.hookimpl(hookwrapper=True, tryfirst=True) +def pytest_runtest_setup() -> Generator[None, None, None]: + yield from unraisable_exception_runtest_hook() + + +@pytest.hookimpl(hookwrapper=True, tryfirst=True) +def pytest_runtest_call() -> Generator[None, None, None]: + yield from unraisable_exception_runtest_hook() + + +@pytest.hookimpl(hookwrapper=True, tryfirst=True) +def pytest_runtest_teardown() -> Generator[None, None, None]: + yield from unraisable_exception_runtest_hook() diff --git a/venv/lib/python3.12/site-packages/_pytest/warning_types.py b/venv/lib/python3.12/site-packages/_pytest/warning_types.py new file mode 100644 index 0000000000..bd5f418734 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/warning_types.py @@ -0,0 +1,170 @@ +import dataclasses +import inspect +import warnings +from types import FunctionType +from typing import Any +from typing import Generic +from typing import Type +from typing import TypeVar + +from _pytest.compat import final + + +class PytestWarning(UserWarning): + """Base class for all warnings emitted by pytest.""" + + __module__ = "pytest" + + +@final +class PytestAssertRewriteWarning(PytestWarning): + """Warning emitted by the pytest assert rewrite module.""" + + __module__ = "pytest" + + +@final +class PytestCacheWarning(PytestWarning): + """Warning emitted by the cache plugin in various situations.""" + + __module__ = "pytest" + + +@final +class PytestConfigWarning(PytestWarning): + """Warning emitted for configuration issues.""" + + __module__ = "pytest" + + +@final +class PytestCollectionWarning(PytestWarning): + """Warning emitted when pytest is not able to collect a file or symbol in a module.""" + + __module__ = "pytest" + + +class PytestDeprecationWarning(PytestWarning, DeprecationWarning): + """Warning class for features that will be removed in a future version.""" + + __module__ = "pytest" + + +class PytestRemovedIn8Warning(PytestDeprecationWarning): + """Warning class for features that will be removed in pytest 8.""" + + __module__ = "pytest" + + +class PytestReturnNotNoneWarning(PytestRemovedIn8Warning): + """Warning emitted when a test function is returning value other than None.""" + + __module__ = "pytest" + + +@final +class PytestExperimentalApiWarning(PytestWarning, FutureWarning): + """Warning category used to denote experiments in pytest. + + Use sparingly as the API might change or even be removed completely in a + future version. + """ + + __module__ = "pytest" + + @classmethod + def simple(cls, apiname: str) -> "PytestExperimentalApiWarning": + return cls( + "{apiname} is an experimental api that may change over time".format( + apiname=apiname + ) + ) + + +@final +class PytestUnhandledCoroutineWarning(PytestReturnNotNoneWarning): + """Warning emitted for an unhandled coroutine. + + A coroutine was encountered when collecting test functions, but was not + handled by any async-aware plugin. + Coroutine test functions are not natively supported. + """ + + __module__ = "pytest" + + +@final +class PytestUnknownMarkWarning(PytestWarning): + """Warning emitted on use of unknown markers. + + See :ref:`mark` for details. + """ + + __module__ = "pytest" + + +@final +class PytestUnraisableExceptionWarning(PytestWarning): + """An unraisable exception was reported. + + Unraisable exceptions are exceptions raised in :meth:`__del__ ` + implementations and similar situations when the exception cannot be raised + as normal. + """ + + __module__ = "pytest" + + +@final +class PytestUnhandledThreadExceptionWarning(PytestWarning): + """An unhandled exception occurred in a :class:`~threading.Thread`. + + Such exceptions don't propagate normally. + """ + + __module__ = "pytest" + + +_W = TypeVar("_W", bound=PytestWarning) + + +@final +@dataclasses.dataclass +class UnformattedWarning(Generic[_W]): + """A warning meant to be formatted during runtime. + + This is used to hold warnings that need to format their message at runtime, + as opposed to a direct message. + """ + + category: Type["_W"] + template: str + + def format(self, **kwargs: Any) -> _W: + """Return an instance of the warning category, formatted with given kwargs.""" + return self.category(self.template.format(**kwargs)) + + +def warn_explicit_for(method: FunctionType, message: PytestWarning) -> None: + """ + Issue the warning :param:`message` for the definition of the given :param:`method` + + this helps to log warnings for functions defined prior to finding an issue with them + (like hook wrappers being marked in a legacy mechanism) + """ + lineno = method.__code__.co_firstlineno + filename = inspect.getfile(method) + module = method.__module__ + mod_globals = method.__globals__ + try: + warnings.warn_explicit( + message, + type(message), + filename=filename, + module=module, + registry=mod_globals.setdefault("__warningregistry__", {}), + lineno=lineno, + ) + except Warning as w: + # If warnings are errors (e.g. -Werror), location information gets lost, so we add it to the message. + raise type(w)(f"{w}\n at {filename}:{lineno}") from None diff --git a/venv/lib/python3.12/site-packages/_pytest/warnings.py b/venv/lib/python3.12/site-packages/_pytest/warnings.py new file mode 100644 index 0000000000..4aaa944529 --- /dev/null +++ b/venv/lib/python3.12/site-packages/_pytest/warnings.py @@ -0,0 +1,148 @@ +import sys +import warnings +from contextlib import contextmanager +from typing import Generator +from typing import Optional +from typing import TYPE_CHECKING + +import pytest +from _pytest.config import apply_warning_filters +from _pytest.config import Config +from _pytest.config import parse_warning_filter +from _pytest.main import Session +from _pytest.nodes import Item +from _pytest.terminal import TerminalReporter + +if TYPE_CHECKING: + from typing_extensions import Literal + + +def pytest_configure(config: Config) -> None: + config.addinivalue_line( + "markers", + "filterwarnings(warning): add a warning filter to the given test. " + "see https://docs.pytest.org/en/stable/how-to/capture-warnings.html#pytest-mark-filterwarnings ", + ) + + +@contextmanager +def catch_warnings_for_item( + config: Config, + ihook, + when: "Literal['config', 'collect', 'runtest']", + item: Optional[Item], +) -> Generator[None, None, None]: + """Context manager that catches warnings generated in the contained execution block. + + ``item`` can be None if we are not in the context of an item execution. + + Each warning captured triggers the ``pytest_warning_recorded`` hook. + """ + config_filters = config.getini("filterwarnings") + cmdline_filters = config.known_args_namespace.pythonwarnings or [] + with warnings.catch_warnings(record=True) as log: + # mypy can't infer that record=True means log is not None; help it. + assert log is not None + + if not sys.warnoptions: + # If user is not explicitly configuring warning filters, show deprecation warnings by default (#2908). + warnings.filterwarnings("always", category=DeprecationWarning) + warnings.filterwarnings("always", category=PendingDeprecationWarning) + + apply_warning_filters(config_filters, cmdline_filters) + + # apply filters from "filterwarnings" marks + nodeid = "" if item is None else item.nodeid + if item is not None: + for mark in item.iter_markers(name="filterwarnings"): + for arg in mark.args: + warnings.filterwarnings(*parse_warning_filter(arg, escape=False)) + + yield + + for warning_message in log: + ihook.pytest_warning_recorded.call_historic( + kwargs=dict( + warning_message=warning_message, + nodeid=nodeid, + when=when, + location=None, + ) + ) + + +def warning_record_to_str(warning_message: warnings.WarningMessage) -> str: + """Convert a warnings.WarningMessage to a string.""" + warn_msg = warning_message.message + msg = warnings.formatwarning( + str(warn_msg), + warning_message.category, + warning_message.filename, + warning_message.lineno, + warning_message.line, + ) + if warning_message.source is not None: + try: + import tracemalloc + except ImportError: + pass + else: + tb = tracemalloc.get_object_traceback(warning_message.source) + if tb is not None: + formatted_tb = "\n".join(tb.format()) + # Use a leading new line to better separate the (large) output + # from the traceback to the previous warning text. + msg += f"\nObject allocated at:\n{formatted_tb}" + else: + # No need for a leading new line. + url = "https://docs.pytest.org/en/stable/how-to/capture-warnings.html#resource-warnings" + msg += "Enable tracemalloc to get traceback where the object was allocated.\n" + msg += f"See {url} for more info." + return msg + + +@pytest.hookimpl(hookwrapper=True, tryfirst=True) +def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]: + with catch_warnings_for_item( + config=item.config, ihook=item.ihook, when="runtest", item=item + ): + yield + + +@pytest.hookimpl(hookwrapper=True, tryfirst=True) +def pytest_collection(session: Session) -> Generator[None, None, None]: + config = session.config + with catch_warnings_for_item( + config=config, ihook=config.hook, when="collect", item=None + ): + yield + + +@pytest.hookimpl(hookwrapper=True) +def pytest_terminal_summary( + terminalreporter: TerminalReporter, +) -> Generator[None, None, None]: + config = terminalreporter.config + with catch_warnings_for_item( + config=config, ihook=config.hook, when="config", item=None + ): + yield + + +@pytest.hookimpl(hookwrapper=True) +def pytest_sessionfinish(session: Session) -> Generator[None, None, None]: + config = session.config + with catch_warnings_for_item( + config=config, ihook=config.hook, when="config", item=None + ): + yield + + +@pytest.hookimpl(hookwrapper=True) +def pytest_load_initial_conftests( + early_config: "Config", +) -> Generator[None, None, None]: + with catch_warnings_for_item( + config=early_config, ihook=early_config.hook, when="config", item=None + ): + yield diff --git a/venv/lib/python3.12/site-packages/_watchdog_fsevents.cpython-312-darwin.so b/venv/lib/python3.12/site-packages/_watchdog_fsevents.cpython-312-darwin.so new file mode 100755 index 0000000000..baa4577336 Binary files /dev/null and b/venv/lib/python3.12/site-packages/_watchdog_fsevents.cpython-312-darwin.so differ diff --git a/venv/lib/python3.12/site-packages/asgiref-3.9.1.dist-info/INSTALLER b/venv/lib/python3.12/site-packages/asgiref-3.9.1.dist-info/INSTALLER new file mode 100644 index 0000000000..a1b589e38a --- /dev/null +++ b/venv/lib/python3.12/site-packages/asgiref-3.9.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.12/site-packages/asgiref-3.9.1.dist-info/METADATA b/venv/lib/python3.12/site-packages/asgiref-3.9.1.dist-info/METADATA new file mode 100644 index 0000000000..925c5ff669 --- /dev/null +++ b/venv/lib/python3.12/site-packages/asgiref-3.9.1.dist-info/METADATA @@ -0,0 +1,247 @@ +Metadata-Version: 2.4 +Name: asgiref +Version: 3.9.1 +Summary: ASGI specs, helper code, and adapters +Home-page: https://github.com/django/asgiref/ +Author: Django Software Foundation +Author-email: foundation@djangoproject.com +License: BSD-3-Clause +Project-URL: Documentation, https://asgi.readthedocs.io/ +Project-URL: Further Documentation, https://docs.djangoproject.com/en/stable/topics/async/#async-adapter-functions +Project-URL: Changelog, https://github.com/django/asgiref/blob/master/CHANGELOG.txt +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Topic :: Internet :: WWW/HTTP +Requires-Python: >=3.9 +License-File: LICENSE +Requires-Dist: typing_extensions>=4; python_version < "3.11" +Provides-Extra: tests +Requires-Dist: pytest; extra == "tests" +Requires-Dist: pytest-asyncio; extra == "tests" +Requires-Dist: mypy>=1.14.0; extra == "tests" +Dynamic: license-file + +asgiref +======= + +.. image:: https://github.com/django/asgiref/actions/workflows/tests.yml/badge.svg + :target: https://github.com/django/asgiref/actions/workflows/tests.yml + +.. image:: https://img.shields.io/pypi/v/asgiref.svg + :target: https://pypi.python.org/pypi/asgiref + +ASGI is a standard for Python asynchronous web apps and servers to communicate +with each other, and positioned as an asynchronous successor to WSGI. You can +read more at https://asgi.readthedocs.io/en/latest/ + +This package includes ASGI base libraries, such as: + +* Sync-to-async and async-to-sync function wrappers, ``asgiref.sync`` +* Server base classes, ``asgiref.server`` +* A WSGI-to-ASGI adapter, in ``asgiref.wsgi`` + + +Function wrappers +----------------- + +These allow you to wrap or decorate async or sync functions to call them from +the other style (so you can call async functions from a synchronous thread, +or vice-versa). + +In particular: + +* AsyncToSync lets a synchronous subthread stop and wait while the async + function is called on the main thread's event loop, and then control is + returned to the thread when the async function is finished. + +* SyncToAsync lets async code call a synchronous function, which is run in + a threadpool and control returned to the async coroutine when the synchronous + function completes. + +The idea is to make it easier to call synchronous APIs from async code and +asynchronous APIs from synchronous code so it's easier to transition code from +one style to the other. In the case of Channels, we wrap the (synchronous) +Django view system with SyncToAsync to allow it to run inside the (asynchronous) +ASGI server. + +Note that exactly what threads things run in is very specific, and aimed to +keep maximum compatibility with old synchronous code. See +"Synchronous code & Threads" below for a full explanation. By default, +``sync_to_async`` will run all synchronous code in the program in the same +thread for safety reasons; you can disable this for more performance with +``@sync_to_async(thread_sensitive=False)``, but make sure that your code does +not rely on anything bound to threads (like database connections) when you do. + + +Threadlocal replacement +----------------------- + +This is a drop-in replacement for ``threading.local`` that works with both +threads and asyncio Tasks. Even better, it will proxy values through from a +task-local context to a thread-local context when you use ``sync_to_async`` +to run things in a threadpool, and vice-versa for ``async_to_sync``. + +If you instead want true thread- and task-safety, you can set +``thread_critical`` on the Local object to ensure this instead. + + +Server base classes +------------------- + +Includes a ``StatelessServer`` class which provides all the hard work of +writing a stateless server (as in, does not handle direct incoming sockets +but instead consumes external streams or sockets to work out what is happening). + +An example of such a server would be a chatbot server that connects out to +a central chat server and provides a "connection scope" per user chatting to +it. There's only one actual connection, but the server has to separate things +into several scopes for easier writing of the code. + +You can see an example of this being used in `frequensgi `_. + + +WSGI-to-ASGI adapter +-------------------- + +Allows you to wrap a WSGI application so it appears as a valid ASGI application. + +Simply wrap it around your WSGI application like so:: + + asgi_application = WsgiToAsgi(wsgi_application) + +The WSGI application will be run in a synchronous threadpool, and the wrapped +ASGI application will be one that accepts ``http`` class messages. + +Please note that not all extended features of WSGI may be supported (such as +file handles for incoming POST bodies). + + +Dependencies +------------ + +``asgiref`` requires Python 3.9 or higher. + + +Contributing +------------ + +Please refer to the +`main Channels contributing docs `_. + + +Testing +''''''' + +To run tests, make sure you have installed the ``tests`` extra with the package:: + + cd asgiref/ + pip install -e .[tests] + pytest + + +Building the documentation +'''''''''''''''''''''''''' + +The documentation uses `Sphinx `_:: + + cd asgiref/docs/ + pip install sphinx + +To build the docs, you can use the default tools:: + + sphinx-build -b html . _build/html # or `make html`, if you've got make set up + cd _build/html + python -m http.server + +...or you can use ``sphinx-autobuild`` to run a server and rebuild/reload +your documentation changes automatically:: + + pip install sphinx-autobuild + sphinx-autobuild . _build/html + + +Releasing +''''''''' + +To release, first add details to CHANGELOG.txt and update the version number in ``asgiref/__init__.py``. + +Then, build and push the packages:: + + python -m build + twine upload dist/* + rm -r asgiref.egg-info dist + + +Implementation Details +---------------------- + +Synchronous code & threads +'''''''''''''''''''''''''' + +The ``asgiref.sync`` module provides two wrappers that let you go between +asynchronous and synchronous code at will, while taking care of the rough edges +for you. + +Unfortunately, the rough edges are numerous, and the code has to work especially +hard to keep things in the same thread as much as possible. Notably, the +restrictions we are working with are: + +* All synchronous code called through ``SyncToAsync`` and marked with + ``thread_sensitive`` should run in the same thread as each other (and if the + outer layer of the program is synchronous, the main thread) + +* If a thread already has a running async loop, ``AsyncToSync`` can't run things + on that loop if it's blocked on synchronous code that is above you in the + call stack. + +The first compromise you get to might be that ``thread_sensitive`` code should +just run in the same thread and not spawn in a sub-thread, fulfilling the first +restriction, but that immediately runs you into the second restriction. + +The only real solution is to essentially have a variant of ThreadPoolExecutor +that executes any ``thread_sensitive`` code on the outermost synchronous +thread - either the main thread, or a single spawned subthread. + +This means you now have two basic states: + +* If the outermost layer of your program is synchronous, then all async code + run through ``AsyncToSync`` will run in a per-call event loop in arbitrary + sub-threads, while all ``thread_sensitive`` code will run in the main thread. + +* If the outermost layer of your program is asynchronous, then all async code + runs on the main thread's event loop, and all ``thread_sensitive`` synchronous + code will run in a single shared sub-thread. + +Crucially, this means that in both cases there is a thread which is a shared +resource that all ``thread_sensitive`` code must run on, and there is a chance +that this thread is currently blocked on its own ``AsyncToSync`` call. Thus, +``AsyncToSync`` needs to act as an executor for thread code while it's blocking. + +The ``CurrentThreadExecutor`` class provides this functionality; rather than +simply waiting on a Future, you can call its ``run_until_future`` method and +it will run submitted code until that Future is done. This means that code +inside the call can then run code on your thread. + + +Maintenance and Security +------------------------ + +To report security issues, please contact security@djangoproject.com. For GPG +signatures and more security process information, see +https://docs.djangoproject.com/en/dev/internals/security/. + +To report bugs or request new features, please open a new GitHub issue. + +This repository is part of the Channels project. For the shepherd and maintenance team, please see the +`main Channels readme `_. diff --git a/venv/lib/python3.12/site-packages/asgiref-3.9.1.dist-info/RECORD b/venv/lib/python3.12/site-packages/asgiref-3.9.1.dist-info/RECORD new file mode 100644 index 0000000000..02e66ec572 --- /dev/null +++ b/venv/lib/python3.12/site-packages/asgiref-3.9.1.dist-info/RECORD @@ -0,0 +1,27 @@ +asgiref-3.9.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +asgiref-3.9.1.dist-info/METADATA,sha256=FI1qnkAdT303WxkQlnpXEZ49dO1z-srL9L-_7uYjqTk,9286 +asgiref-3.9.1.dist-info/RECORD,, +asgiref-3.9.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91 +asgiref-3.9.1.dist-info/licenses/LICENSE,sha256=uEZBXRtRTpwd_xSiLeuQbXlLxUbKYSn5UKGM0JHipmk,1552 +asgiref-3.9.1.dist-info/top_level.txt,sha256=bokQjCzwwERhdBiPdvYEZa4cHxT4NCeAffQNUqJ8ssg,8 +asgiref/__init__.py,sha256=97OdSef30we1CjsM0qA6wEMk67472iE4_gOoam4odOg,22 +asgiref/__pycache__/__init__.cpython-312.pyc,, +asgiref/__pycache__/compatibility.cpython-312.pyc,, +asgiref/__pycache__/current_thread_executor.cpython-312.pyc,, +asgiref/__pycache__/local.cpython-312.pyc,, +asgiref/__pycache__/server.cpython-312.pyc,, +asgiref/__pycache__/sync.cpython-312.pyc,, +asgiref/__pycache__/testing.cpython-312.pyc,, +asgiref/__pycache__/timeout.cpython-312.pyc,, +asgiref/__pycache__/typing.cpython-312.pyc,, +asgiref/__pycache__/wsgi.cpython-312.pyc,, +asgiref/compatibility.py,sha256=DhY1SOpOvOw0Y1lSEjCqg-znRUQKecG3LTaV48MZi68,1606 +asgiref/current_thread_executor.py,sha256=42CU1VODLTk-_PYise-cP1XgyAvI5Djc8f97owFzdrs,4157 +asgiref/local.py,sha256=ZZeWWIXptVU4GbNApMMWQ-skuglvodcQA5WpzJDMxh4,4912 +asgiref/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +asgiref/server.py,sha256=3A68169Nuh2sTY_2O5JzRd_opKObWvvrEFcrXssq3kA,6311 +asgiref/sync.py,sha256=5dlK0T61pMSNWf--49nUojn0mfduqq6ryuwyXxv2Y-A,20417 +asgiref/testing.py,sha256=U5wcs_-ZYTO5SIGfl80EqRAGv_T8BHrAhvAKRuuztT4,4421 +asgiref/timeout.py,sha256=LtGL-xQpG8JHprdsEUCMErJ0kNWj4qwWZhEHJ3iKu4s,3627 +asgiref/typing.py,sha256=Zi72AZlOyF1C7N14LLZnpAdfUH4ljoBqFdQo_bBKMq0,6290 +asgiref/wsgi.py,sha256=fxBLgUE_0PEVgcp13ticz6GHf3q-aKWcB5eFPhd6yxo,6753 diff --git a/venv/lib/python3.12/site-packages/asgiref-3.9.1.dist-info/WHEEL b/venv/lib/python3.12/site-packages/asgiref-3.9.1.dist-info/WHEEL new file mode 100644 index 0000000000..e7fa31b6f3 --- /dev/null +++ b/venv/lib/python3.12/site-packages/asgiref-3.9.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (80.9.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/venv/lib/python3.12/site-packages/asgiref-3.9.1.dist-info/licenses/LICENSE b/venv/lib/python3.12/site-packages/asgiref-3.9.1.dist-info/licenses/LICENSE new file mode 100644 index 0000000000..5f4f225dd2 --- /dev/null +++ b/venv/lib/python3.12/site-packages/asgiref-3.9.1.dist-info/licenses/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) Django Software Foundation and individual contributors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of Django nor the names of its contributors may be used + to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/lib/python3.12/site-packages/asgiref-3.9.1.dist-info/top_level.txt b/venv/lib/python3.12/site-packages/asgiref-3.9.1.dist-info/top_level.txt new file mode 100644 index 0000000000..ddf99d3d4f --- /dev/null +++ b/venv/lib/python3.12/site-packages/asgiref-3.9.1.dist-info/top_level.txt @@ -0,0 +1 @@ +asgiref diff --git a/venv/lib/python3.12/site-packages/asgiref/__init__.py b/venv/lib/python3.12/site-packages/asgiref/__init__.py new file mode 100644 index 0000000000..3e9a639238 --- /dev/null +++ b/venv/lib/python3.12/site-packages/asgiref/__init__.py @@ -0,0 +1 @@ +__version__ = "3.9.1" diff --git a/venv/lib/python3.12/site-packages/asgiref/compatibility.py b/venv/lib/python3.12/site-packages/asgiref/compatibility.py new file mode 100644 index 0000000000..3a2a63e6ec --- /dev/null +++ b/venv/lib/python3.12/site-packages/asgiref/compatibility.py @@ -0,0 +1,48 @@ +import inspect + +from .sync import iscoroutinefunction + + +def is_double_callable(application): + """ + Tests to see if an application is a legacy-style (double-callable) application. + """ + # Look for a hint on the object first + if getattr(application, "_asgi_single_callable", False): + return False + if getattr(application, "_asgi_double_callable", False): + return True + # Uninstanted classes are double-callable + if inspect.isclass(application): + return True + # Instanted classes depend on their __call__ + if hasattr(application, "__call__"): + # We only check to see if its __call__ is a coroutine function - + # if it's not, it still might be a coroutine function itself. + if iscoroutinefunction(application.__call__): + return False + # Non-classes we just check directly + return not iscoroutinefunction(application) + + +def double_to_single_callable(application): + """ + Transforms a double-callable ASGI application into a single-callable one. + """ + + async def new_application(scope, receive, send): + instance = application(scope) + return await instance(receive, send) + + return new_application + + +def guarantee_single_callable(application): + """ + Takes either a single- or double-callable application and always returns it + in single-callable style. Use this to add backwards compatibility for ASGI + 2.0 applications to your server/test harness/etc. + """ + if is_double_callable(application): + application = double_to_single_callable(application) + return application diff --git a/venv/lib/python3.12/site-packages/asgiref/current_thread_executor.py b/venv/lib/python3.12/site-packages/asgiref/current_thread_executor.py new file mode 100644 index 0000000000..6293cbd7f2 --- /dev/null +++ b/venv/lib/python3.12/site-packages/asgiref/current_thread_executor.py @@ -0,0 +1,123 @@ +import sys +import threading +from collections import deque +from concurrent.futures import Executor, Future +from typing import Any, Callable, TypeVar + +if sys.version_info >= (3, 10): + from typing import ParamSpec +else: + from typing_extensions import ParamSpec + +_T = TypeVar("_T") +_P = ParamSpec("_P") +_R = TypeVar("_R") + + +class _WorkItem: + """ + Represents an item needing to be run in the executor. + Copied from ThreadPoolExecutor (but it's private, so we're not going to rely on importing it) + """ + + def __init__( + self, + future: "Future[_R]", + fn: Callable[_P, _R], + *args: _P.args, + **kwargs: _P.kwargs, + ): + self.future = future + self.fn = fn + self.args = args + self.kwargs = kwargs + + def run(self) -> None: + __traceback_hide__ = True # noqa: F841 + if not self.future.set_running_or_notify_cancel(): + return + try: + result = self.fn(*self.args, **self.kwargs) + except BaseException as exc: + self.future.set_exception(exc) + # Break a reference cycle with the exception 'exc' + self = None # type: ignore[assignment] + else: + self.future.set_result(result) + + +class CurrentThreadExecutor(Executor): + """ + An Executor that actually runs code in the thread it is instantiated in. + Passed to other threads running async code, so they can run sync code in + the thread they came from. + """ + + def __init__(self, old_executor: "CurrentThreadExecutor | None") -> None: + self._work_thread = threading.current_thread() + self._work_ready = threading.Condition(threading.Lock()) + self._work_items = deque[_WorkItem]() # synchronized by _work_ready + self._broken = False # synchronized by _work_ready + self._old_executor = old_executor + + def run_until_future(self, future: "Future[Any]") -> None: + """ + Runs the code in the work queue until a result is available from the future. + Should be run from the thread the executor is initialised in. + """ + # Check we're in the right thread + if threading.current_thread() != self._work_thread: + raise RuntimeError( + "You cannot run CurrentThreadExecutor from a different thread" + ) + + def done(future: "Future[Any]") -> None: + with self._work_ready: + self._broken = True + self._work_ready.notify() + + future.add_done_callback(done) + # Keep getting and running work items until the future we're waiting for + # is done and the queue is empty. + while True: + with self._work_ready: + while not self._work_items and not self._broken: + self._work_ready.wait() + if not self._work_items: + break + # Get a work item and run it + work_item = self._work_items.popleft() + work_item.run() + del work_item + + def submit( + self, + fn: Callable[_P, _R], + /, + *args: _P.args, + **kwargs: _P.kwargs, + ) -> "Future[_R]": + # Check they're not submitting from the same thread + if threading.current_thread() == self._work_thread: + raise RuntimeError( + "You cannot submit onto CurrentThreadExecutor from its own thread" + ) + f: "Future[_R]" = Future() + work_item = _WorkItem(f, fn, *args, **kwargs) + + # Walk up the CurrentThreadExecutor stack to find the closest one still + # running + executor = self + while True: + with executor._work_ready: + if not executor._broken: + # Add to work queue + executor._work_items.append(work_item) + executor._work_ready.notify() + break + if executor._old_executor is None: + raise RuntimeError("CurrentThreadExecutor already quit or is broken") + executor = executor._old_executor + + # Return the future + return f diff --git a/venv/lib/python3.12/site-packages/asgiref/local.py b/venv/lib/python3.12/site-packages/asgiref/local.py new file mode 100644 index 0000000000..845313a81d --- /dev/null +++ b/venv/lib/python3.12/site-packages/asgiref/local.py @@ -0,0 +1,131 @@ +import asyncio +import contextlib +import contextvars +import threading +from typing import Any, Dict, Union + + +class _CVar: + """Storage utility for Local.""" + + def __init__(self) -> None: + self._data: "contextvars.ContextVar[Dict[str, Any]]" = contextvars.ContextVar( + "asgiref.local" + ) + + def __getattr__(self, key): + storage_object = self._data.get({}) + try: + return storage_object[key] + except KeyError: + raise AttributeError(f"{self!r} object has no attribute {key!r}") + + def __setattr__(self, key: str, value: Any) -> None: + if key == "_data": + return super().__setattr__(key, value) + + storage_object = self._data.get({}).copy() + storage_object[key] = value + self._data.set(storage_object) + + def __delattr__(self, key: str) -> None: + storage_object = self._data.get({}).copy() + if key in storage_object: + del storage_object[key] + self._data.set(storage_object) + else: + raise AttributeError(f"{self!r} object has no attribute {key!r}") + + +class Local: + """Local storage for async tasks. + + This is a namespace object (similar to `threading.local`) where data is + also local to the current async task (if there is one). + + In async threads, local means in the same sense as the `contextvars` + module - i.e. a value set in an async frame will be visible: + + - to other async code `await`-ed from this frame. + - to tasks spawned using `asyncio` utilities (`create_task`, `wait_for`, + `gather` and probably others). + - to code scheduled in a sync thread using `sync_to_async` + + In "sync" threads (a thread with no async event loop running), the + data is thread-local, but additionally shared with async code executed + via the `async_to_sync` utility, which schedules async code in a new thread + and copies context across to that thread. + + If `thread_critical` is True, then the local will only be visible per-thread, + behaving exactly like `threading.local` if the thread is sync, and as + `contextvars` if the thread is async. This allows genuinely thread-sensitive + code (such as DB handles) to be kept stricly to their initial thread and + disable the sharing across `sync_to_async` and `async_to_sync` wrapped calls. + + Unlike plain `contextvars` objects, this utility is threadsafe. + """ + + def __init__(self, thread_critical: bool = False) -> None: + self._thread_critical = thread_critical + self._thread_lock = threading.RLock() + + self._storage: "Union[threading.local, _CVar]" + + if thread_critical: + # Thread-local storage + self._storage = threading.local() + else: + # Contextvar storage + self._storage = _CVar() + + @contextlib.contextmanager + def _lock_storage(self): + # Thread safe access to storage + if self._thread_critical: + is_async = True + try: + # this is a test for are we in a async or sync + # thread - will raise RuntimeError if there is + # no current loop + asyncio.get_running_loop() + except RuntimeError: + is_async = False + if not is_async: + # We are in a sync thread, the storage is + # just the plain thread local (i.e, "global within + # this thread" - it doesn't matter where you are + # in a call stack you see the same storage) + yield self._storage + else: + # We are in an async thread - storage is still + # local to this thread, but additionally should + # behave like a context var (is only visible with + # the same async call stack) + + # Ensure context exists in the current thread + if not hasattr(self._storage, "cvar"): + self._storage.cvar = _CVar() + + # self._storage is a thread local, so the members + # can't be accessed in another thread (we don't + # need any locks) + yield self._storage.cvar + else: + # Lock for thread_critical=False as other threads + # can access the exact same storage object + with self._thread_lock: + yield self._storage + + def __getattr__(self, key): + with self._lock_storage() as storage: + return getattr(storage, key) + + def __setattr__(self, key, value): + if key in ("_local", "_storage", "_thread_critical", "_thread_lock"): + return super().__setattr__(key, value) + with self._lock_storage() as storage: + setattr(storage, key, value) + + def __delattr__(self, key): + with self._lock_storage() as storage: + delattr(storage, key) diff --git a/venv/lib/python3.12/site-packages/asgiref/py.typed b/venv/lib/python3.12/site-packages/asgiref/py.typed new file mode 100644 index 0000000000..e69de29bb2 diff --git a/venv/lib/python3.12/site-packages/asgiref/server.py b/venv/lib/python3.12/site-packages/asgiref/server.py new file mode 100644 index 0000000000..131ea4305d --- /dev/null +++ b/venv/lib/python3.12/site-packages/asgiref/server.py @@ -0,0 +1,173 @@ +import asyncio +import logging +import time +import traceback + +from .compatibility import guarantee_single_callable + +logger = logging.getLogger(__name__) + + +class StatelessServer: + """ + Base server class that handles basic concepts like application instance + creation/pooling, exception handling, and similar, for stateless protocols + (i.e. ones without actual incoming connections to the process) + + Your code should override the handle() method, doing whatever it needs to, + and calling get_or_create_application_instance with a unique `scope_id` + and `scope` for the scope it wants to get. + + If an application instance is found with the same `scope_id`, you are + given its input queue, otherwise one is made for you with the scope provided + and you are given that fresh new input queue. Either way, you should do + something like: + + input_queue = self.get_or_create_application_instance( + "user-123456", + {"type": "testprotocol", "user_id": "123456", "username": "andrew"}, + ) + input_queue.put_nowait(message) + + If you try and create an application instance and there are already + `max_application` instances, the oldest/least recently used one will be + reclaimed and shut down to make space. + + Application coroutines that error will be found periodically (every 100ms + by default) and have their exceptions printed to the console. Override + application_exception() if you want to do more when this happens. + + If you override run(), make sure you handle things like launching the + application checker. + """ + + application_checker_interval = 0.1 + + def __init__(self, application, max_applications=1000): + # Parameters + self.application = application + self.max_applications = max_applications + # Initialisation + self.application_instances = {} + + ### Mainloop and handling + + def run(self): + """ + Runs the asyncio event loop with our handler loop. + """ + event_loop = asyncio.get_event_loop() + try: + event_loop.run_until_complete(self.arun()) + except KeyboardInterrupt: + logger.info("Exiting due to Ctrl-C/interrupt") + + async def arun(self): + """ + Runs the asyncio event loop with our handler loop. + """ + + class Done(Exception): + pass + + async def handle(): + await self.handle() + raise Done + + try: + await asyncio.gather(self.application_checker(), handle()) + except Done: + pass + + async def handle(self): + raise NotImplementedError("You must implement handle()") + + async def application_send(self, scope, message): + """ + Receives outbound sends from applications and handles them. + """ + raise NotImplementedError("You must implement application_send()") + + ### Application instance management + + def get_or_create_application_instance(self, scope_id, scope): + """ + Creates an application instance and returns its queue. + """ + if scope_id in self.application_instances: + self.application_instances[scope_id]["last_used"] = time.time() + return self.application_instances[scope_id]["input_queue"] + # See if we need to delete an old one + while len(self.application_instances) > self.max_applications: + self.delete_oldest_application_instance() + # Make an instance of the application + input_queue = asyncio.Queue() + application_instance = guarantee_single_callable(self.application) + # Run it, and stash the future for later checking + future = asyncio.ensure_future( + application_instance( + scope=scope, + receive=input_queue.get, + send=lambda message: self.application_send(scope, message), + ), + ) + self.application_instances[scope_id] = { + "input_queue": input_queue, + "future": future, + "scope": scope, + "last_used": time.time(), + } + return input_queue + + def delete_oldest_application_instance(self): + """ + Finds and deletes the oldest application instance + """ + oldest_time = min( + details["last_used"] for details in self.application_instances.values() + ) + for scope_id, details in self.application_instances.items(): + if details["last_used"] == oldest_time: + self.delete_application_instance(scope_id) + # Return to make sure we only delete one in case two have + # the same oldest time + return + + def delete_application_instance(self, scope_id): + """ + Removes an application instance (makes sure its task is stopped, + then removes it from the current set) + """ + details = self.application_instances[scope_id] + del self.application_instances[scope_id] + if not details["future"].done(): + details["future"].cancel() + + async def application_checker(self): + """ + Goes through the set of current application instance Futures and cleans up + any that are done/prints exceptions for any that errored. + """ + while True: + await asyncio.sleep(self.application_checker_interval) + for scope_id, details in list(self.application_instances.items()): + if details["future"].done(): + exception = details["future"].exception() + if exception: + await self.application_exception(exception, details) + try: + del self.application_instances[scope_id] + except KeyError: + # Exception handling might have already got here before us. That's fine. + pass + + async def application_exception(self, exception, application_details): + """ + Called whenever an application coroutine has an exception. + """ + logging.error( + "Exception inside application: %s\n%s%s", + exception, + "".join(traceback.format_tb(exception.__traceback__)), + f" {exception}", + ) diff --git a/venv/lib/python3.12/site-packages/asgiref/sync.py b/venv/lib/python3.12/site-packages/asgiref/sync.py new file mode 100644 index 0000000000..54496efec5 --- /dev/null +++ b/venv/lib/python3.12/site-packages/asgiref/sync.py @@ -0,0 +1,584 @@ +import asyncio +import asyncio.coroutines +import contextvars +import functools +import inspect +import os +import sys +import threading +import warnings +import weakref +from concurrent.futures import Future, ThreadPoolExecutor +from typing import ( + TYPE_CHECKING, + Any, + Awaitable, + Callable, + Coroutine, + Dict, + Generic, + List, + Optional, + TypeVar, + Union, + overload, +) + +from .current_thread_executor import CurrentThreadExecutor +from .local import Local + +if sys.version_info >= (3, 10): + from typing import ParamSpec +else: + from typing_extensions import ParamSpec + +if TYPE_CHECKING: + # This is not available to import at runtime + from _typeshed import OptExcInfo + +_F = TypeVar("_F", bound=Callable[..., Any]) +_P = ParamSpec("_P") +_R = TypeVar("_R") + + +def _restore_context(context: contextvars.Context) -> None: + # Check for changes in contextvars, and set them to the current + # context for downstream consumers + for cvar in context: + cvalue = context.get(cvar) + try: + if cvar.get() != cvalue: + cvar.set(cvalue) + except LookupError: + cvar.set(cvalue) + + +# Python 3.12 deprecates asyncio.iscoroutinefunction() as an alias for +# inspect.iscoroutinefunction(), whilst also removing the _is_coroutine marker. +# The latter is replaced with the inspect.markcoroutinefunction decorator. +# Until 3.12 is the minimum supported Python version, provide a shim. + +if hasattr(inspect, "markcoroutinefunction"): + iscoroutinefunction = inspect.iscoroutinefunction + markcoroutinefunction: Callable[[_F], _F] = inspect.markcoroutinefunction +else: + iscoroutinefunction = asyncio.iscoroutinefunction # type: ignore[assignment] + + def markcoroutinefunction(func: _F) -> _F: + func._is_coroutine = asyncio.coroutines._is_coroutine # type: ignore + return func + + +class ThreadSensitiveContext: + """Async context manager to manage context for thread sensitive mode + + This context manager controls which thread pool executor is used when in + thread sensitive mode. By default, a single thread pool executor is shared + within a process. + + The ThreadSensitiveContext() context manager may be used to specify a + thread pool per context. + + This context manager is re-entrant, so only the outer-most call to + ThreadSensitiveContext will set the context. + + Usage: + + >>> import time + >>> async with ThreadSensitiveContext(): + ... await sync_to_async(time.sleep, 1)() + """ + + def __init__(self): + self.token = None + + async def __aenter__(self): + try: + SyncToAsync.thread_sensitive_context.get() + except LookupError: + self.token = SyncToAsync.thread_sensitive_context.set(self) + + return self + + async def __aexit__(self, exc, value, tb): + if not self.token: + return + + executor = SyncToAsync.context_to_thread_executor.pop(self, None) + if executor: + executor.shutdown() + SyncToAsync.thread_sensitive_context.reset(self.token) + + +class AsyncToSync(Generic[_P, _R]): + """ + Utility class which turns an awaitable that only works on the thread with + the event loop into a synchronous callable that works in a subthread. + + If the call stack contains an async loop, the code runs there. + Otherwise, the code runs in a new loop in a new thread. + + Either way, this thread then pauses and waits to run any thread_sensitive + code called from further down the call stack using SyncToAsync, before + finally exiting once the async task returns. + """ + + # Keeps a reference to the CurrentThreadExecutor in local context, so that + # any sync_to_async inside the wrapped code can find it. + executors: "Local" = Local() + + # When we can't find a CurrentThreadExecutor from the context, such as + # inside create_task, we'll look it up here from the running event loop. + loop_thread_executors: "Dict[asyncio.AbstractEventLoop, CurrentThreadExecutor]" = {} + + def __init__( + self, + awaitable: Union[ + Callable[_P, Coroutine[Any, Any, _R]], + Callable[_P, Awaitable[_R]], + ], + force_new_loop: bool = False, + ): + if not callable(awaitable) or ( + not iscoroutinefunction(awaitable) + and not iscoroutinefunction(getattr(awaitable, "__call__", awaitable)) + ): + # Python does not have very reliable detection of async functions + # (lots of false negatives) so this is just a warning. + warnings.warn( + "async_to_sync was passed a non-async-marked callable", stacklevel=2 + ) + self.awaitable = awaitable + try: + self.__self__ = self.awaitable.__self__ # type: ignore[union-attr] + except AttributeError: + pass + self.force_new_loop = force_new_loop + self.main_event_loop = None + try: + self.main_event_loop = asyncio.get_running_loop() + except RuntimeError: + # There's no event loop in this thread. + pass + + def __call__(self, *args: _P.args, **kwargs: _P.kwargs) -> _R: + __traceback_hide__ = True # noqa: F841 + + if not self.force_new_loop and not self.main_event_loop: + # There's no event loop in this thread. Look for the threadlocal if + # we're inside SyncToAsync + main_event_loop_pid = getattr( + SyncToAsync.threadlocal, "main_event_loop_pid", None + ) + # We make sure the parent loop is from the same process - if + # they've forked, this is not going to be valid any more (#194) + if main_event_loop_pid and main_event_loop_pid == os.getpid(): + self.main_event_loop = getattr( + SyncToAsync.threadlocal, "main_event_loop", None + ) + + # You can't call AsyncToSync from a thread with a running event loop + try: + asyncio.get_running_loop() + except RuntimeError: + pass + else: + raise RuntimeError( + "You cannot use AsyncToSync in the same thread as an async event loop - " + "just await the async function directly." + ) + + # Make a future for the return information + call_result: "Future[_R]" = Future() + + # Make a CurrentThreadExecutor we'll use to idle in this thread - we + # need one for every sync frame, even if there's one above us in the + # same thread. + old_executor = getattr(self.executors, "current", None) + current_executor = CurrentThreadExecutor(old_executor) + self.executors.current = current_executor + + # Wrapping context in list so it can be reassigned from within + # `main_wrap`. + context = [contextvars.copy_context()] + + # Get task context so that parent task knows which task to propagate + # an asyncio.CancelledError to. + task_context = getattr(SyncToAsync.threadlocal, "task_context", None) + + # Use call_soon_threadsafe to schedule a synchronous callback on the + # main event loop's thread if it's there, otherwise make a new loop + # in this thread. + try: + awaitable = self.main_wrap( + call_result, + sys.exc_info(), + task_context, + context, + # prepare an awaitable which can be passed as is to self.main_wrap, + # so that `args` and `kwargs` don't need to be + # destructured when passed to self.main_wrap + # (which is required by `ParamSpec`) + # as that may cause overlapping arguments + self.awaitable(*args, **kwargs), + ) + + async def new_loop_wrap() -> None: + loop = asyncio.get_running_loop() + self.loop_thread_executors[loop] = current_executor + try: + await awaitable + finally: + del self.loop_thread_executors[loop] + + if self.main_event_loop is not None: + try: + self.main_event_loop.call_soon_threadsafe( + self.main_event_loop.create_task, awaitable + ) + except RuntimeError: + running_in_main_event_loop = False + else: + running_in_main_event_loop = True + # Run the CurrentThreadExecutor until the future is done. + current_executor.run_until_future(call_result) + else: + running_in_main_event_loop = False + + if not running_in_main_event_loop: + # Make our own event loop - in a new thread - and run inside that. + loop_executor = ThreadPoolExecutor(max_workers=1) + loop_future = loop_executor.submit(asyncio.run, new_loop_wrap()) + # Run the CurrentThreadExecutor until the future is done. + current_executor.run_until_future(loop_future) + # Wait for future and/or allow for exception propagation + loop_future.result() + finally: + _restore_context(context[0]) + # Restore old current thread executor state + self.executors.current = old_executor + + # Wait for results from the future. + return call_result.result() + + def __get__(self, parent: Any, objtype: Any) -> Callable[_P, _R]: + """ + Include self for methods + """ + func = functools.partial(self.__call__, parent) + return functools.update_wrapper(func, self.awaitable) + + async def main_wrap( + self, + call_result: "Future[_R]", + exc_info: "OptExcInfo", + task_context: "Optional[List[asyncio.Task[Any]]]", + context: List[contextvars.Context], + awaitable: Union[Coroutine[Any, Any, _R], Awaitable[_R]], + ) -> None: + """ + Wraps the awaitable with something that puts the result into the + result/exception future. + """ + + __traceback_hide__ = True # noqa: F841 + + if context is not None: + _restore_context(context[0]) + + current_task = asyncio.current_task() + if current_task is not None and task_context is not None: + task_context.append(current_task) + + try: + # If we have an exception, run the function inside the except block + # after raising it so exc_info is correctly populated. + if exc_info[1]: + try: + raise exc_info[1] + except BaseException: + result = await awaitable + else: + result = await awaitable + except BaseException as e: + call_result.set_exception(e) + else: + call_result.set_result(result) + finally: + if current_task is not None and task_context is not None: + task_context.remove(current_task) + context[0] = contextvars.copy_context() + + +class SyncToAsync(Generic[_P, _R]): + """ + Utility class which turns a synchronous callable into an awaitable that + runs in a threadpool. It also sets a threadlocal inside the thread so + calls to AsyncToSync can escape it. + + If thread_sensitive is passed, the code will run in the same thread as any + outer code. This is needed for underlying Python code that is not + threadsafe (for example, code which handles SQLite database connections). + + If the outermost program is async (i.e. SyncToAsync is outermost), then + this will be a dedicated single sub-thread that all sync code runs in, + one after the other. If the outermost program is sync (i.e. AsyncToSync is + outermost), this will just be the main thread. This is achieved by idling + with a CurrentThreadExecutor while AsyncToSync is blocking its sync parent, + rather than just blocking. + + If executor is passed in, that will be used instead of the loop's default executor. + In order to pass in an executor, thread_sensitive must be set to False, otherwise + a TypeError will be raised. + """ + + # Storage for main event loop references + threadlocal = threading.local() + + # Single-thread executor for thread-sensitive code + single_thread_executor = ThreadPoolExecutor(max_workers=1) + + # Maintain a contextvar for the current execution context. Optionally used + # for thread sensitive mode. + thread_sensitive_context: "contextvars.ContextVar[ThreadSensitiveContext]" = ( + contextvars.ContextVar("thread_sensitive_context") + ) + + # Contextvar that is used to detect if the single thread executor + # would be awaited on while already being used in the same context + deadlock_context: "contextvars.ContextVar[bool]" = contextvars.ContextVar( + "deadlock_context" + ) + + # Maintaining a weak reference to the context ensures that thread pools are + # erased once the context goes out of scope. This terminates the thread pool. + context_to_thread_executor: "weakref.WeakKeyDictionary[ThreadSensitiveContext, ThreadPoolExecutor]" = ( + weakref.WeakKeyDictionary() + ) + + def __init__( + self, + func: Callable[_P, _R], + thread_sensitive: bool = True, + executor: Optional["ThreadPoolExecutor"] = None, + ) -> None: + if ( + not callable(func) + or iscoroutinefunction(func) + or iscoroutinefunction(getattr(func, "__call__", func)) + ): + raise TypeError("sync_to_async can only be applied to sync functions.") + self.func = func + functools.update_wrapper(self, func) + self._thread_sensitive = thread_sensitive + markcoroutinefunction(self) + if thread_sensitive and executor is not None: + raise TypeError("executor must not be set when thread_sensitive is True") + self._executor = executor + try: + self.__self__ = func.__self__ # type: ignore + except AttributeError: + pass + + async def __call__(self, *args: _P.args, **kwargs: _P.kwargs) -> _R: + __traceback_hide__ = True # noqa: F841 + loop = asyncio.get_running_loop() + + # Work out what thread to run the code in + if self._thread_sensitive: + current_thread_executor = getattr(AsyncToSync.executors, "current", None) + if current_thread_executor: + # If we have a parent sync thread above somewhere, use that + executor = current_thread_executor + elif self.thread_sensitive_context.get(None): + # If we have a way of retrieving the current context, attempt + # to use a per-context thread pool executor + thread_sensitive_context = self.thread_sensitive_context.get() + + if thread_sensitive_context in self.context_to_thread_executor: + # Re-use thread executor in current context + executor = self.context_to_thread_executor[thread_sensitive_context] + else: + # Create new thread executor in current context + executor = ThreadPoolExecutor(max_workers=1) + self.context_to_thread_executor[thread_sensitive_context] = executor + elif loop in AsyncToSync.loop_thread_executors: + # Re-use thread executor for running loop + executor = AsyncToSync.loop_thread_executors[loop] + elif self.deadlock_context.get(False): + raise RuntimeError( + "Single thread executor already being used, would deadlock" + ) + else: + # Otherwise, we run it in a fixed single thread + executor = self.single_thread_executor + self.deadlock_context.set(True) + else: + # Use the passed in executor, or the loop's default if it is None + executor = self._executor + + context = contextvars.copy_context() + child = functools.partial(self.func, *args, **kwargs) + func = context.run + task_context: List[asyncio.Task[Any]] = [] + + # Run the code in the right thread + exec_coro = loop.run_in_executor( + executor, + functools.partial( + self.thread_handler, + loop, + sys.exc_info(), + task_context, + func, + child, + ), + ) + ret: _R + try: + ret = await asyncio.shield(exec_coro) + except asyncio.CancelledError: + cancel_parent = True + try: + task = task_context[0] + task.cancel() + try: + await task + cancel_parent = False + except asyncio.CancelledError: + pass + except IndexError: + pass + if exec_coro.done(): + raise + if cancel_parent: + exec_coro.cancel() + ret = await exec_coro + finally: + _restore_context(context) + self.deadlock_context.set(False) + + return ret + + def __get__( + self, parent: Any, objtype: Any + ) -> Callable[_P, Coroutine[Any, Any, _R]]: + """ + Include self for methods + """ + func = functools.partial(self.__call__, parent) + return functools.update_wrapper(func, self.func) + + def thread_handler(self, loop, exc_info, task_context, func, *args, **kwargs): + """ + Wraps the sync application with exception handling. + """ + + __traceback_hide__ = True # noqa: F841 + + # Set the threadlocal for AsyncToSync + self.threadlocal.main_event_loop = loop + self.threadlocal.main_event_loop_pid = os.getpid() + self.threadlocal.task_context = task_context + + # Run the function + # If we have an exception, run the function inside the except block + # after raising it so exc_info is correctly populated. + if exc_info[1]: + try: + raise exc_info[1] + except BaseException: + return func(*args, **kwargs) + else: + return func(*args, **kwargs) + + +@overload +def async_to_sync( + *, + force_new_loop: bool = False, +) -> Callable[ + [Union[Callable[_P, Coroutine[Any, Any, _R]], Callable[_P, Awaitable[_R]]]], + Callable[_P, _R], +]: + ... + + +@overload +def async_to_sync( + awaitable: Union[ + Callable[_P, Coroutine[Any, Any, _R]], + Callable[_P, Awaitable[_R]], + ], + *, + force_new_loop: bool = False, +) -> Callable[_P, _R]: + ... + + +def async_to_sync( + awaitable: Optional[ + Union[ + Callable[_P, Coroutine[Any, Any, _R]], + Callable[_P, Awaitable[_R]], + ] + ] = None, + *, + force_new_loop: bool = False, +) -> Union[ + Callable[ + [Union[Callable[_P, Coroutine[Any, Any, _R]], Callable[_P, Awaitable[_R]]]], + Callable[_P, _R], + ], + Callable[_P, _R], +]: + if awaitable is None: + return lambda f: AsyncToSync( + f, + force_new_loop=force_new_loop, + ) + return AsyncToSync( + awaitable, + force_new_loop=force_new_loop, + ) + + +@overload +def sync_to_async( + *, + thread_sensitive: bool = True, + executor: Optional["ThreadPoolExecutor"] = None, +) -> Callable[[Callable[_P, _R]], Callable[_P, Coroutine[Any, Any, _R]]]: + ... + + +@overload +def sync_to_async( + func: Callable[_P, _R], + *, + thread_sensitive: bool = True, + executor: Optional["ThreadPoolExecutor"] = None, +) -> Callable[_P, Coroutine[Any, Any, _R]]: + ... + + +def sync_to_async( + func: Optional[Callable[_P, _R]] = None, + *, + thread_sensitive: bool = True, + executor: Optional["ThreadPoolExecutor"] = None, +) -> Union[ + Callable[[Callable[_P, _R]], Callable[_P, Coroutine[Any, Any, _R]]], + Callable[_P, Coroutine[Any, Any, _R]], +]: + if func is None: + return lambda f: SyncToAsync( + f, + thread_sensitive=thread_sensitive, + executor=executor, + ) + return SyncToAsync( + func, + thread_sensitive=thread_sensitive, + executor=executor, + ) diff --git a/venv/lib/python3.12/site-packages/asgiref/testing.py b/venv/lib/python3.12/site-packages/asgiref/testing.py new file mode 100644 index 0000000000..d6ccee5877 --- /dev/null +++ b/venv/lib/python3.12/site-packages/asgiref/testing.py @@ -0,0 +1,137 @@ +import asyncio +import contextvars +import time + +from .compatibility import guarantee_single_callable +from .timeout import timeout as async_timeout + + +class ApplicationCommunicator: + """ + Runs an ASGI application in a test mode, allowing sending of + messages to it and retrieval of messages it sends. + """ + + def __init__(self, application, scope): + self._future = None + self.application = guarantee_single_callable(application) + self.scope = scope + self._input_queue = None + self._output_queue = None + + # For Python 3.9 we need to lazily bind the queues, on 3.10+ they bind the + # event loop lazily. + @property + def input_queue(self): + if self._input_queue is None: + self._input_queue = asyncio.Queue() + return self._input_queue + + @property + def output_queue(self): + if self._output_queue is None: + self._output_queue = asyncio.Queue() + return self._output_queue + + @property + def future(self): + if self._future is None: + # Clear context - this ensures that context vars set in the testing scope + # are not "leaked" into the application which would normally begin with + # an empty context. In Python >= 3.11 this could also be written as: + # asyncio.create_task(..., context=contextvars.Context()) + self._future = contextvars.Context().run( + asyncio.create_task, + self.application( + self.scope, self.input_queue.get, self.output_queue.put + ), + ) + return self._future + + async def wait(self, timeout=1): + """ + Waits for the application to stop itself and returns any exceptions. + """ + try: + async with async_timeout(timeout): + try: + await self.future + self.future.result() + except asyncio.CancelledError: + pass + finally: + if not self.future.done(): + self.future.cancel() + try: + await self.future + except asyncio.CancelledError: + pass + + def stop(self, exceptions=True): + future = self._future + if future is None: + return + + if not future.done(): + future.cancel() + elif exceptions: + # Give a chance to raise any exceptions + future.result() + + def __del__(self): + # Clean up on deletion + try: + self.stop(exceptions=False) + except RuntimeError: + # Event loop already stopped + pass + + async def send_input(self, message): + """ + Sends a single message to the application + """ + # Make sure there's not an exception to raise from the task + if self.future.done(): + self.future.result() + + # Give it the message + await self.input_queue.put(message) + + async def receive_output(self, timeout=1): + """ + Receives a single message from the application, with optional timeout. + """ + # Make sure there's not an exception to raise from the task + if self.future.done(): + self.future.result() + # Wait and receive the message + try: + async with async_timeout(timeout): + return await self.output_queue.get() + except asyncio.TimeoutError as e: + # See if we have another error to raise inside + if self.future.done(): + self.future.result() + else: + self.future.cancel() + try: + await self.future + except asyncio.CancelledError: + pass + raise e + + async def receive_nothing(self, timeout=0.1, interval=0.01): + """ + Checks that there is no message to receive in the given time. + """ + # Make sure there's not an exception to raise from the task + if self.future.done(): + self.future.result() + + # `interval` has precedence over `timeout` + start = time.monotonic() + while time.monotonic() - start < timeout: + if not self.output_queue.empty(): + return False + await asyncio.sleep(interval) + return self.output_queue.empty() diff --git a/venv/lib/python3.12/site-packages/asgiref/timeout.py b/venv/lib/python3.12/site-packages/asgiref/timeout.py new file mode 100644 index 0000000000..fd5381d0d9 --- /dev/null +++ b/venv/lib/python3.12/site-packages/asgiref/timeout.py @@ -0,0 +1,118 @@ +# This code is originally sourced from the aio-libs project "async_timeout", +# under the Apache 2.0 license. You may see the original project at +# https://github.com/aio-libs/async-timeout + +# It is vendored here to reduce chain-dependencies on this library, and +# modified slightly to remove some features we don't use. + + +import asyncio +import warnings +from types import TracebackType +from typing import Any # noqa +from typing import Optional, Type + + +class timeout: + """timeout context manager. + + Useful in cases when you want to apply timeout logic around block + of code or in cases when asyncio.wait_for is not suitable. For example: + + >>> with timeout(0.001): + ... async with aiohttp.get('https://github.com') as r: + ... await r.text() + + + timeout - value in seconds or None to disable timeout logic + loop - asyncio compatible event loop + """ + + def __init__( + self, + timeout: Optional[float], + *, + loop: Optional[asyncio.AbstractEventLoop] = None, + ) -> None: + self._timeout = timeout + if loop is None: + loop = asyncio.get_running_loop() + else: + warnings.warn( + """The loop argument to timeout() is deprecated.""", DeprecationWarning + ) + self._loop = loop + self._task = None # type: Optional[asyncio.Task[Any]] + self._cancelled = False + self._cancel_handler = None # type: Optional[asyncio.Handle] + self._cancel_at = None # type: Optional[float] + + def __enter__(self) -> "timeout": + return self._do_enter() + + def __exit__( + self, + exc_type: Type[BaseException], + exc_val: BaseException, + exc_tb: TracebackType, + ) -> Optional[bool]: + self._do_exit(exc_type) + return None + + async def __aenter__(self) -> "timeout": + return self._do_enter() + + async def __aexit__( + self, + exc_type: Type[BaseException], + exc_val: BaseException, + exc_tb: TracebackType, + ) -> None: + self._do_exit(exc_type) + + @property + def expired(self) -> bool: + return self._cancelled + + @property + def remaining(self) -> Optional[float]: + if self._cancel_at is not None: + return max(self._cancel_at - self._loop.time(), 0.0) + else: + return None + + def _do_enter(self) -> "timeout": + # Support Tornado 5- without timeout + # Details: https://github.com/python/asyncio/issues/392 + if self._timeout is None: + return self + + self._task = asyncio.current_task(self._loop) + if self._task is None: + raise RuntimeError( + "Timeout context manager should be used " "inside a task" + ) + + if self._timeout <= 0: + self._loop.call_soon(self._cancel_task) + return self + + self._cancel_at = self._loop.time() + self._timeout + self._cancel_handler = self._loop.call_at(self._cancel_at, self._cancel_task) + return self + + def _do_exit(self, exc_type: Type[BaseException]) -> None: + if exc_type is asyncio.CancelledError and self._cancelled: + self._cancel_handler = None + self._task = None + raise asyncio.TimeoutError + if self._timeout is not None and self._cancel_handler is not None: + self._cancel_handler.cancel() + self._cancel_handler = None + self._task = None + return None + + def _cancel_task(self) -> None: + if self._task is not None: + self._task.cancel() + self._cancelled = True diff --git a/venv/lib/python3.12/site-packages/asgiref/typing.py b/venv/lib/python3.12/site-packages/asgiref/typing.py new file mode 100644 index 0000000000..368aed59bf --- /dev/null +++ b/venv/lib/python3.12/site-packages/asgiref/typing.py @@ -0,0 +1,279 @@ +import sys +from typing import ( + Any, + Awaitable, + Callable, + Dict, + Iterable, + Literal, + Optional, + Protocol, + Tuple, + Type, + TypedDict, + Union, +) + +if sys.version_info >= (3, 11): + from typing import NotRequired +else: + from typing_extensions import NotRequired + +__all__ = ( + "ASGIVersions", + "HTTPScope", + "WebSocketScope", + "LifespanScope", + "WWWScope", + "Scope", + "HTTPRequestEvent", + "HTTPResponseStartEvent", + "HTTPResponseBodyEvent", + "HTTPResponseTrailersEvent", + "HTTPResponsePathsendEvent", + "HTTPServerPushEvent", + "HTTPDisconnectEvent", + "WebSocketConnectEvent", + "WebSocketAcceptEvent", + "WebSocketReceiveEvent", + "WebSocketSendEvent", + "WebSocketResponseStartEvent", + "WebSocketResponseBodyEvent", + "WebSocketDisconnectEvent", + "WebSocketCloseEvent", + "LifespanStartupEvent", + "LifespanShutdownEvent", + "LifespanStartupCompleteEvent", + "LifespanStartupFailedEvent", + "LifespanShutdownCompleteEvent", + "LifespanShutdownFailedEvent", + "ASGIReceiveEvent", + "ASGISendEvent", + "ASGIReceiveCallable", + "ASGISendCallable", + "ASGI2Protocol", + "ASGI2Application", + "ASGI3Application", + "ASGIApplication", +) + + +class ASGIVersions(TypedDict): + spec_version: str + version: Union[Literal["2.0"], Literal["3.0"]] + + +class HTTPScope(TypedDict): + type: Literal["http"] + asgi: ASGIVersions + http_version: str + method: str + scheme: str + path: str + raw_path: bytes + query_string: bytes + root_path: str + headers: Iterable[Tuple[bytes, bytes]] + client: Optional[Tuple[str, int]] + server: Optional[Tuple[str, Optional[int]]] + state: NotRequired[Dict[str, Any]] + extensions: Optional[Dict[str, Dict[object, object]]] + + +class WebSocketScope(TypedDict): + type: Literal["websocket"] + asgi: ASGIVersions + http_version: str + scheme: str + path: str + raw_path: bytes + query_string: bytes + root_path: str + headers: Iterable[Tuple[bytes, bytes]] + client: Optional[Tuple[str, int]] + server: Optional[Tuple[str, Optional[int]]] + subprotocols: Iterable[str] + state: NotRequired[Dict[str, Any]] + extensions: Optional[Dict[str, Dict[object, object]]] + + +class LifespanScope(TypedDict): + type: Literal["lifespan"] + asgi: ASGIVersions + state: NotRequired[Dict[str, Any]] + + +WWWScope = Union[HTTPScope, WebSocketScope] +Scope = Union[HTTPScope, WebSocketScope, LifespanScope] + + +class HTTPRequestEvent(TypedDict): + type: Literal["http.request"] + body: bytes + more_body: bool + + +class HTTPResponseDebugEvent(TypedDict): + type: Literal["http.response.debug"] + info: Dict[str, object] + + +class HTTPResponseStartEvent(TypedDict): + type: Literal["http.response.start"] + status: int + headers: Iterable[Tuple[bytes, bytes]] + trailers: bool + + +class HTTPResponseBodyEvent(TypedDict): + type: Literal["http.response.body"] + body: bytes + more_body: bool + + +class HTTPResponseTrailersEvent(TypedDict): + type: Literal["http.response.trailers"] + headers: Iterable[Tuple[bytes, bytes]] + more_trailers: bool + + +class HTTPResponsePathsendEvent(TypedDict): + type: Literal["http.response.pathsend"] + path: str + + +class HTTPServerPushEvent(TypedDict): + type: Literal["http.response.push"] + path: str + headers: Iterable[Tuple[bytes, bytes]] + + +class HTTPDisconnectEvent(TypedDict): + type: Literal["http.disconnect"] + + +class WebSocketConnectEvent(TypedDict): + type: Literal["websocket.connect"] + + +class WebSocketAcceptEvent(TypedDict): + type: Literal["websocket.accept"] + subprotocol: Optional[str] + headers: Iterable[Tuple[bytes, bytes]] + + +class WebSocketReceiveEvent(TypedDict): + type: Literal["websocket.receive"] + bytes: Optional[bytes] + text: Optional[str] + + +class WebSocketSendEvent(TypedDict): + type: Literal["websocket.send"] + bytes: Optional[bytes] + text: Optional[str] + + +class WebSocketResponseStartEvent(TypedDict): + type: Literal["websocket.http.response.start"] + status: int + headers: Iterable[Tuple[bytes, bytes]] + + +class WebSocketResponseBodyEvent(TypedDict): + type: Literal["websocket.http.response.body"] + body: bytes + more_body: bool + + +class WebSocketDisconnectEvent(TypedDict): + type: Literal["websocket.disconnect"] + code: int + reason: Optional[str] + + +class WebSocketCloseEvent(TypedDict): + type: Literal["websocket.close"] + code: int + reason: Optional[str] + + +class LifespanStartupEvent(TypedDict): + type: Literal["lifespan.startup"] + + +class LifespanShutdownEvent(TypedDict): + type: Literal["lifespan.shutdown"] + + +class LifespanStartupCompleteEvent(TypedDict): + type: Literal["lifespan.startup.complete"] + + +class LifespanStartupFailedEvent(TypedDict): + type: Literal["lifespan.startup.failed"] + message: str + + +class LifespanShutdownCompleteEvent(TypedDict): + type: Literal["lifespan.shutdown.complete"] + + +class LifespanShutdownFailedEvent(TypedDict): + type: Literal["lifespan.shutdown.failed"] + message: str + + +ASGIReceiveEvent = Union[ + HTTPRequestEvent, + HTTPDisconnectEvent, + WebSocketConnectEvent, + WebSocketReceiveEvent, + WebSocketDisconnectEvent, + LifespanStartupEvent, + LifespanShutdownEvent, +] + + +ASGISendEvent = Union[ + HTTPResponseStartEvent, + HTTPResponseBodyEvent, + HTTPResponseTrailersEvent, + HTTPServerPushEvent, + HTTPDisconnectEvent, + WebSocketAcceptEvent, + WebSocketSendEvent, + WebSocketResponseStartEvent, + WebSocketResponseBodyEvent, + WebSocketCloseEvent, + LifespanStartupCompleteEvent, + LifespanStartupFailedEvent, + LifespanShutdownCompleteEvent, + LifespanShutdownFailedEvent, +] + + +ASGIReceiveCallable = Callable[[], Awaitable[ASGIReceiveEvent]] +ASGISendCallable = Callable[[ASGISendEvent], Awaitable[None]] + + +class ASGI2Protocol(Protocol): + def __init__(self, scope: Scope) -> None: + ... + + async def __call__( + self, receive: ASGIReceiveCallable, send: ASGISendCallable + ) -> None: + ... + + +ASGI2Application = Type[ASGI2Protocol] +ASGI3Application = Callable[ + [ + Scope, + ASGIReceiveCallable, + ASGISendCallable, + ], + Awaitable[None], +] +ASGIApplication = Union[ASGI2Application, ASGI3Application] diff --git a/venv/lib/python3.12/site-packages/asgiref/wsgi.py b/venv/lib/python3.12/site-packages/asgiref/wsgi.py new file mode 100644 index 0000000000..65af427959 --- /dev/null +++ b/venv/lib/python3.12/site-packages/asgiref/wsgi.py @@ -0,0 +1,166 @@ +from io import BytesIO +from tempfile import SpooledTemporaryFile + +from asgiref.sync import AsyncToSync, sync_to_async + + +class WsgiToAsgi: + """ + Wraps a WSGI application to make it into an ASGI application. + """ + + def __init__(self, wsgi_application): + self.wsgi_application = wsgi_application + + async def __call__(self, scope, receive, send): + """ + ASGI application instantiation point. + We return a new WsgiToAsgiInstance here with the WSGI app + and the scope, ready to respond when it is __call__ed. + """ + await WsgiToAsgiInstance(self.wsgi_application)(scope, receive, send) + + +class WsgiToAsgiInstance: + """ + Per-socket instance of a wrapped WSGI application + """ + + def __init__(self, wsgi_application): + self.wsgi_application = wsgi_application + self.response_started = False + self.response_content_length = None + + async def __call__(self, scope, receive, send): + if scope["type"] != "http": + raise ValueError("WSGI wrapper received a non-HTTP scope") + self.scope = scope + with SpooledTemporaryFile(max_size=65536) as body: + # Alright, wait for the http.request messages + while True: + message = await receive() + if message["type"] != "http.request": + raise ValueError("WSGI wrapper received a non-HTTP-request message") + body.write(message.get("body", b"")) + if not message.get("more_body"): + break + body.seek(0) + # Wrap send so it can be called from the subthread + self.sync_send = AsyncToSync(send) + # Call the WSGI app + await self.run_wsgi_app(body) + + def build_environ(self, scope, body): + """ + Builds a scope and request body into a WSGI environ object. + """ + script_name = scope.get("root_path", "").encode("utf8").decode("latin1") + path_info = scope["path"].encode("utf8").decode("latin1") + if path_info.startswith(script_name): + path_info = path_info[len(script_name) :] + environ = { + "REQUEST_METHOD": scope["method"], + "SCRIPT_NAME": script_name, + "PATH_INFO": path_info, + "QUERY_STRING": scope["query_string"].decode("ascii"), + "SERVER_PROTOCOL": "HTTP/%s" % scope["http_version"], + "wsgi.version": (1, 0), + "wsgi.url_scheme": scope.get("scheme", "http"), + "wsgi.input": body, + "wsgi.errors": BytesIO(), + "wsgi.multithread": True, + "wsgi.multiprocess": True, + "wsgi.run_once": False, + } + # Get server name and port - required in WSGI, not in ASGI + if "server" in scope: + environ["SERVER_NAME"] = scope["server"][0] + environ["SERVER_PORT"] = str(scope["server"][1]) + else: + environ["SERVER_NAME"] = "localhost" + environ["SERVER_PORT"] = "80" + + if scope.get("client") is not None: + environ["REMOTE_ADDR"] = scope["client"][0] + + # Go through headers and make them into environ entries + for name, value in self.scope.get("headers", []): + name = name.decode("latin1") + if name == "content-length": + corrected_name = "CONTENT_LENGTH" + elif name == "content-type": + corrected_name = "CONTENT_TYPE" + else: + corrected_name = "HTTP_%s" % name.upper().replace("-", "_") + # HTTPbis say only ASCII chars are allowed in headers, but we latin1 just in case + value = value.decode("latin1") + if corrected_name in environ: + value = environ[corrected_name] + "," + value + environ[corrected_name] = value + return environ + + def start_response(self, status, response_headers, exc_info=None): + """ + WSGI start_response callable. + """ + # Don't allow re-calling once response has begun + if self.response_started: + raise exc_info[1].with_traceback(exc_info[2]) + # Don't allow re-calling without exc_info + if hasattr(self, "response_start") and exc_info is None: + raise ValueError( + "You cannot call start_response a second time without exc_info" + ) + # Extract status code + status_code, _ = status.split(" ", 1) + status_code = int(status_code) + # Extract headers + headers = [ + (name.lower().encode("ascii"), value.encode("ascii")) + for name, value in response_headers + ] + # Extract content-length + self.response_content_length = None + for name, value in response_headers: + if name.lower() == "content-length": + self.response_content_length = int(value) + # Build and send response start message. + self.response_start = { + "type": "http.response.start", + "status": status_code, + "headers": headers, + } + + @sync_to_async + def run_wsgi_app(self, body): + """ + Called in a subthread to run the WSGI app. We encapsulate like + this so that the start_response callable is called in the same thread. + """ + # Translate the scope and incoming request body into a WSGI environ + environ = self.build_environ(self.scope, body) + # Run the WSGI app + bytes_sent = 0 + for output in self.wsgi_application(environ, self.start_response): + # If this is the first response, include the response headers + if not self.response_started: + self.response_started = True + self.sync_send(self.response_start) + # If the application supplies a Content-Length header + if self.response_content_length is not None: + # The server should not transmit more bytes to the client than the header allows + bytes_allowed = self.response_content_length - bytes_sent + if len(output) > bytes_allowed: + output = output[:bytes_allowed] + self.sync_send( + {"type": "http.response.body", "body": output, "more_body": True} + ) + bytes_sent += len(output) + # The server should stop iterating over the response when enough data has been sent + if bytes_sent == self.response_content_length: + break + # Close connection + if not self.response_started: + self.response_started = True + self.sync_send(self.response_start) + self.sync_send({"type": "http.response.body"}) diff --git a/venv/lib/python3.12/site-packages/attr/__init__.py b/venv/lib/python3.12/site-packages/attr/__init__.py new file mode 100644 index 0000000000..386305d628 --- /dev/null +++ b/venv/lib/python3.12/site-packages/attr/__init__.py @@ -0,0 +1,79 @@ +# SPDX-License-Identifier: MIT + + +import sys + +from functools import partial + +from . import converters, exceptions, filters, setters, validators +from ._cmp import cmp_using +from ._config import get_run_validators, set_run_validators +from ._funcs import asdict, assoc, astuple, evolve, has, resolve_types +from ._make import ( + NOTHING, + Attribute, + Factory, + attrib, + attrs, + fields, + fields_dict, + make_class, + validate, +) +from ._version_info import VersionInfo + + +__version__ = "22.1.0" +__version_info__ = VersionInfo._from_version_string(__version__) + +__title__ = "attrs" +__description__ = "Classes Without Boilerplate" +__url__ = "https://www.attrs.org/" +__uri__ = __url__ +__doc__ = __description__ + " <" + __uri__ + ">" + +__author__ = "Hynek Schlawack" +__email__ = "hs@ox.cx" + +__license__ = "MIT" +__copyright__ = "Copyright (c) 2015 Hynek Schlawack" + + +s = attributes = attrs +ib = attr = attrib +dataclass = partial(attrs, auto_attribs=True) # happy Easter ;) + +__all__ = [ + "Attribute", + "Factory", + "NOTHING", + "asdict", + "assoc", + "astuple", + "attr", + "attrib", + "attributes", + "attrs", + "cmp_using", + "converters", + "evolve", + "exceptions", + "fields", + "fields_dict", + "filters", + "get_run_validators", + "has", + "ib", + "make_class", + "resolve_types", + "s", + "set_run_validators", + "setters", + "validate", + "validators", +] + +if sys.version_info[:2] >= (3, 6): + from ._next_gen import define, field, frozen, mutable # noqa: F401 + + __all__.extend(("define", "field", "frozen", "mutable")) diff --git a/venv/lib/python3.12/site-packages/attr/__init__.pyi b/venv/lib/python3.12/site-packages/attr/__init__.pyi new file mode 100644 index 0000000000..03cc4c82d2 --- /dev/null +++ b/venv/lib/python3.12/site-packages/attr/__init__.pyi @@ -0,0 +1,486 @@ +import sys + +from typing import ( + Any, + Callable, + ClassVar, + Dict, + Generic, + List, + Mapping, + Optional, + Protocol, + Sequence, + Tuple, + Type, + TypeVar, + Union, + overload, +) + +# `import X as X` is required to make these public +from . import converters as converters +from . import exceptions as exceptions +from . import filters as filters +from . import setters as setters +from . import validators as validators +from ._cmp import cmp_using as cmp_using +from ._version_info import VersionInfo + +__version__: str +__version_info__: VersionInfo +__title__: str +__description__: str +__url__: str +__uri__: str +__author__: str +__email__: str +__license__: str +__copyright__: str + +_T = TypeVar("_T") +_C = TypeVar("_C", bound=type) + +_EqOrderType = Union[bool, Callable[[Any], Any]] +_ValidatorType = Callable[[Any, Attribute[_T], _T], Any] +_ConverterType = Callable[[Any], Any] +_FilterType = Callable[[Attribute[_T], _T], bool] +_ReprType = Callable[[Any], str] +_ReprArgType = Union[bool, _ReprType] +_OnSetAttrType = Callable[[Any, Attribute[Any], Any], Any] +_OnSetAttrArgType = Union[ + _OnSetAttrType, List[_OnSetAttrType], setters._NoOpType +] +_FieldTransformer = Callable[ + [type, List[Attribute[Any]]], List[Attribute[Any]] +] +# FIXME: in reality, if multiple validators are passed they must be in a list +# or tuple, but those are invariant and so would prevent subtypes of +# _ValidatorType from working when passed in a list or tuple. +_ValidatorArgType = Union[_ValidatorType[_T], Sequence[_ValidatorType[_T]]] + +# A protocol to be able to statically accept an attrs class. +class AttrsInstance(Protocol): + __attrs_attrs__: ClassVar[Any] + +# _make -- + +NOTHING: object + +# NOTE: Factory lies about its return type to make this possible: +# `x: List[int] # = Factory(list)` +# Work around mypy issue #4554 in the common case by using an overload. +if sys.version_info >= (3, 8): + from typing import Literal + @overload + def Factory(factory: Callable[[], _T]) -> _T: ... + @overload + def Factory( + factory: Callable[[Any], _T], + takes_self: Literal[True], + ) -> _T: ... + @overload + def Factory( + factory: Callable[[], _T], + takes_self: Literal[False], + ) -> _T: ... + +else: + @overload + def Factory(factory: Callable[[], _T]) -> _T: ... + @overload + def Factory( + factory: Union[Callable[[Any], _T], Callable[[], _T]], + takes_self: bool = ..., + ) -> _T: ... + +# Static type inference support via __dataclass_transform__ implemented as per: +# https://github.com/microsoft/pyright/blob/1.1.135/specs/dataclass_transforms.md +# This annotation must be applied to all overloads of "define" and "attrs" +# +# NOTE: This is a typing construct and does not exist at runtime. Extensions +# wrapping attrs decorators should declare a separate __dataclass_transform__ +# signature in the extension module using the specification linked above to +# provide pyright support. +def __dataclass_transform__( + *, + eq_default: bool = True, + order_default: bool = False, + kw_only_default: bool = False, + field_descriptors: Tuple[Union[type, Callable[..., Any]], ...] = (()), +) -> Callable[[_T], _T]: ... + +class Attribute(Generic[_T]): + name: str + default: Optional[_T] + validator: Optional[_ValidatorType[_T]] + repr: _ReprArgType + cmp: _EqOrderType + eq: _EqOrderType + order: _EqOrderType + hash: Optional[bool] + init: bool + converter: Optional[_ConverterType] + metadata: Dict[Any, Any] + type: Optional[Type[_T]] + kw_only: bool + on_setattr: _OnSetAttrType + def evolve(self, **changes: Any) -> "Attribute[Any]": ... + +# NOTE: We had several choices for the annotation to use for type arg: +# 1) Type[_T] +# - Pros: Handles simple cases correctly +# - Cons: Might produce less informative errors in the case of conflicting +# TypeVars e.g. `attr.ib(default='bad', type=int)` +# 2) Callable[..., _T] +# - Pros: Better error messages than #1 for conflicting TypeVars +# - Cons: Terrible error messages for validator checks. +# e.g. attr.ib(type=int, validator=validate_str) +# -> error: Cannot infer function type argument +# 3) type (and do all of the work in the mypy plugin) +# - Pros: Simple here, and we could customize the plugin with our own errors. +# - Cons: Would need to write mypy plugin code to handle all the cases. +# We chose option #1. + +# `attr` lies about its return type to make the following possible: +# attr() -> Any +# attr(8) -> int +# attr(validator=) -> Whatever the callable expects. +# This makes this type of assignments possible: +# x: int = attr(8) +# +# This form catches explicit None or no default but with no other arguments +# returns Any. +@overload +def attrib( + default: None = ..., + validator: None = ..., + repr: _ReprArgType = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + type: None = ..., + converter: None = ..., + factory: None = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., +) -> Any: ... + +# This form catches an explicit None or no default and infers the type from the +# other arguments. +@overload +def attrib( + default: None = ..., + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + type: Optional[Type[_T]] = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., +) -> _T: ... + +# This form catches an explicit default argument. +@overload +def attrib( + default: _T, + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + type: Optional[Type[_T]] = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., +) -> _T: ... + +# This form covers type=non-Type: e.g. forward references (str), Any +@overload +def attrib( + default: Optional[_T] = ..., + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + type: object = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., +) -> Any: ... +@overload +def field( + *, + default: None = ..., + validator: None = ..., + repr: _ReprArgType = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + converter: None = ..., + factory: None = ..., + kw_only: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., +) -> Any: ... + +# This form catches an explicit None or no default and infers the type from the +# other arguments. +@overload +def field( + *, + default: None = ..., + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., +) -> _T: ... + +# This form catches an explicit default argument. +@overload +def field( + *, + default: _T, + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., +) -> _T: ... + +# This form covers type=non-Type: e.g. forward references (str), Any +@overload +def field( + *, + default: Optional[_T] = ..., + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., +) -> Any: ... +@overload +@__dataclass_transform__(order_default=True, field_descriptors=(attrib, field)) +def attrs( + maybe_cls: _C, + these: Optional[Dict[str, Any]] = ..., + repr_ns: Optional[str] = ..., + repr: bool = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + auto_detect: bool = ..., + collect_by_mro: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., +) -> _C: ... +@overload +@__dataclass_transform__(order_default=True, field_descriptors=(attrib, field)) +def attrs( + maybe_cls: None = ..., + these: Optional[Dict[str, Any]] = ..., + repr_ns: Optional[str] = ..., + repr: bool = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + auto_detect: bool = ..., + collect_by_mro: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., +) -> Callable[[_C], _C]: ... +@overload +@__dataclass_transform__(field_descriptors=(attrib, field)) +def define( + maybe_cls: _C, + *, + these: Optional[Dict[str, Any]] = ..., + repr: bool = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., + auto_detect: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., +) -> _C: ... +@overload +@__dataclass_transform__(field_descriptors=(attrib, field)) +def define( + maybe_cls: None = ..., + *, + these: Optional[Dict[str, Any]] = ..., + repr: bool = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., + auto_detect: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., +) -> Callable[[_C], _C]: ... + +mutable = define +frozen = define # they differ only in their defaults + +def fields(cls: Type[AttrsInstance]) -> Any: ... +def fields_dict(cls: Type[AttrsInstance]) -> Dict[str, Attribute[Any]]: ... +def validate(inst: AttrsInstance) -> None: ... +def resolve_types( + cls: _C, + globalns: Optional[Dict[str, Any]] = ..., + localns: Optional[Dict[str, Any]] = ..., + attribs: Optional[List[Attribute[Any]]] = ..., +) -> _C: ... + +# TODO: add support for returning a proper attrs class from the mypy plugin +# we use Any instead of _CountingAttr so that e.g. `make_class('Foo', +# [attr.ib()])` is valid +def make_class( + name: str, + attrs: Union[List[str], Tuple[str, ...], Dict[str, Any]], + bases: Tuple[type, ...] = ..., + repr_ns: Optional[str] = ..., + repr: bool = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + collect_by_mro: bool = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., +) -> type: ... + +# _funcs -- + +# TODO: add support for returning TypedDict from the mypy plugin +# FIXME: asdict/astuple do not honor their factory args. Waiting on one of +# these: +# https://github.com/python/mypy/issues/4236 +# https://github.com/python/typing/issues/253 +# XXX: remember to fix attrs.asdict/astuple too! +def asdict( + inst: AttrsInstance, + recurse: bool = ..., + filter: Optional[_FilterType[Any]] = ..., + dict_factory: Type[Mapping[Any, Any]] = ..., + retain_collection_types: bool = ..., + value_serializer: Optional[ + Callable[[type, Attribute[Any], Any], Any] + ] = ..., + tuple_keys: Optional[bool] = ..., +) -> Dict[str, Any]: ... + +# TODO: add support for returning NamedTuple from the mypy plugin +def astuple( + inst: AttrsInstance, + recurse: bool = ..., + filter: Optional[_FilterType[Any]] = ..., + tuple_factory: Type[Sequence[Any]] = ..., + retain_collection_types: bool = ..., +) -> Tuple[Any, ...]: ... +def has(cls: type) -> bool: ... +def assoc(inst: _T, **changes: Any) -> _T: ... +def evolve(inst: _T, **changes: Any) -> _T: ... + +# _config -- + +def set_run_validators(run: bool) -> None: ... +def get_run_validators() -> bool: ... + +# aliases -- + +s = attributes = attrs +ib = attr = attrib +dataclass = attrs # Technically, partial(attrs, auto_attribs=True) ;) diff --git a/venv/lib/python3.12/site-packages/attr/_cmp.py b/venv/lib/python3.12/site-packages/attr/_cmp.py new file mode 100644 index 0000000000..81b99e4c33 --- /dev/null +++ b/venv/lib/python3.12/site-packages/attr/_cmp.py @@ -0,0 +1,155 @@ +# SPDX-License-Identifier: MIT + + +import functools +import types + +from ._make import _make_ne + + +_operation_names = {"eq": "==", "lt": "<", "le": "<=", "gt": ">", "ge": ">="} + + +def cmp_using( + eq=None, + lt=None, + le=None, + gt=None, + ge=None, + require_same_type=True, + class_name="Comparable", +): + """ + Create a class that can be passed into `attr.ib`'s ``eq``, ``order``, and + ``cmp`` arguments to customize field comparison. + + The resulting class will have a full set of ordering methods if + at least one of ``{lt, le, gt, ge}`` and ``eq`` are provided. + + :param Optional[callable] eq: `callable` used to evaluate equality + of two objects. + :param Optional[callable] lt: `callable` used to evaluate whether + one object is less than another object. + :param Optional[callable] le: `callable` used to evaluate whether + one object is less than or equal to another object. + :param Optional[callable] gt: `callable` used to evaluate whether + one object is greater than another object. + :param Optional[callable] ge: `callable` used to evaluate whether + one object is greater than or equal to another object. + + :param bool require_same_type: When `True`, equality and ordering methods + will return `NotImplemented` if objects are not of the same type. + + :param Optional[str] class_name: Name of class. Defaults to 'Comparable'. + + See `comparison` for more details. + + .. versionadded:: 21.1.0 + """ + + body = { + "__slots__": ["value"], + "__init__": _make_init(), + "_requirements": [], + "_is_comparable_to": _is_comparable_to, + } + + # Add operations. + num_order_functions = 0 + has_eq_function = False + + if eq is not None: + has_eq_function = True + body["__eq__"] = _make_operator("eq", eq) + body["__ne__"] = _make_ne() + + if lt is not None: + num_order_functions += 1 + body["__lt__"] = _make_operator("lt", lt) + + if le is not None: + num_order_functions += 1 + body["__le__"] = _make_operator("le", le) + + if gt is not None: + num_order_functions += 1 + body["__gt__"] = _make_operator("gt", gt) + + if ge is not None: + num_order_functions += 1 + body["__ge__"] = _make_operator("ge", ge) + + type_ = types.new_class( + class_name, (object,), {}, lambda ns: ns.update(body) + ) + + # Add same type requirement. + if require_same_type: + type_._requirements.append(_check_same_type) + + # Add total ordering if at least one operation was defined. + if 0 < num_order_functions < 4: + if not has_eq_function: + # functools.total_ordering requires __eq__ to be defined, + # so raise early error here to keep a nice stack. + raise ValueError( + "eq must be define is order to complete ordering from " + "lt, le, gt, ge." + ) + type_ = functools.total_ordering(type_) + + return type_ + + +def _make_init(): + """ + Create __init__ method. + """ + + def __init__(self, value): + """ + Initialize object with *value*. + """ + self.value = value + + return __init__ + + +def _make_operator(name, func): + """ + Create operator method. + """ + + def method(self, other): + if not self._is_comparable_to(other): + return NotImplemented + + result = func(self.value, other.value) + if result is NotImplemented: + return NotImplemented + + return result + + method.__name__ = "__%s__" % (name,) + method.__doc__ = "Return a %s b. Computed by attrs." % ( + _operation_names[name], + ) + + return method + + +def _is_comparable_to(self, other): + """ + Check whether `other` is comparable to `self`. + """ + for func in self._requirements: + if not func(self, other): + return False + return True + + +def _check_same_type(self, other): + """ + Return True if *self* and *other* are of the same type, False otherwise. + """ + return other.value.__class__ is self.value.__class__ diff --git a/venv/lib/python3.12/site-packages/attr/_cmp.pyi b/venv/lib/python3.12/site-packages/attr/_cmp.pyi new file mode 100644 index 0000000000..35437eff62 --- /dev/null +++ b/venv/lib/python3.12/site-packages/attr/_cmp.pyi @@ -0,0 +1,13 @@ +from typing import Any, Callable, Optional, Type + +_CompareWithType = Callable[[Any, Any], bool] + +def cmp_using( + eq: Optional[_CompareWithType], + lt: Optional[_CompareWithType], + le: Optional[_CompareWithType], + gt: Optional[_CompareWithType], + ge: Optional[_CompareWithType], + require_same_type: bool, + class_name: str, +) -> Type: ... diff --git a/venv/lib/python3.12/site-packages/attr/_compat.py b/venv/lib/python3.12/site-packages/attr/_compat.py new file mode 100644 index 0000000000..5826493257 --- /dev/null +++ b/venv/lib/python3.12/site-packages/attr/_compat.py @@ -0,0 +1,185 @@ +# SPDX-License-Identifier: MIT + + +import inspect +import platform +import sys +import threading +import types +import warnings + +from collections.abc import Mapping, Sequence # noqa + + +PYPY = platform.python_implementation() == "PyPy" +PY36 = sys.version_info[:2] >= (3, 6) +HAS_F_STRINGS = PY36 +PY310 = sys.version_info[:2] >= (3, 10) + + +if PYPY or PY36: + ordered_dict = dict +else: + from collections import OrderedDict + + ordered_dict = OrderedDict + + +def just_warn(*args, **kw): + warnings.warn( + "Running interpreter doesn't sufficiently support code object " + "introspection. Some features like bare super() or accessing " + "__class__ will not work with slotted classes.", + RuntimeWarning, + stacklevel=2, + ) + + +class _AnnotationExtractor: + """ + Extract type annotations from a callable, returning None whenever there + is none. + """ + + __slots__ = ["sig"] + + def __init__(self, callable): + try: + self.sig = inspect.signature(callable) + except (ValueError, TypeError): # inspect failed + self.sig = None + + def get_first_param_type(self): + """ + Return the type annotation of the first argument if it's not empty. + """ + if not self.sig: + return None + + params = list(self.sig.parameters.values()) + if params and params[0].annotation is not inspect.Parameter.empty: + return params[0].annotation + + return None + + def get_return_type(self): + """ + Return the return type if it's not empty. + """ + if ( + self.sig + and self.sig.return_annotation is not inspect.Signature.empty + ): + return self.sig.return_annotation + + return None + + +def make_set_closure_cell(): + """Return a function of two arguments (cell, value) which sets + the value stored in the closure cell `cell` to `value`. + """ + # pypy makes this easy. (It also supports the logic below, but + # why not do the easy/fast thing?) + if PYPY: + + def set_closure_cell(cell, value): + cell.__setstate__((value,)) + + return set_closure_cell + + # Otherwise gotta do it the hard way. + + # Create a function that will set its first cellvar to `value`. + def set_first_cellvar_to(value): + x = value + return + + # This function will be eliminated as dead code, but + # not before its reference to `x` forces `x` to be + # represented as a closure cell rather than a local. + def force_x_to_be_a_cell(): # pragma: no cover + return x + + try: + # Extract the code object and make sure our assumptions about + # the closure behavior are correct. + co = set_first_cellvar_to.__code__ + if co.co_cellvars != ("x",) or co.co_freevars != (): + raise AssertionError # pragma: no cover + + # Convert this code object to a code object that sets the + # function's first _freevar_ (not cellvar) to the argument. + if sys.version_info >= (3, 8): + + def set_closure_cell(cell, value): + cell.cell_contents = value + + else: + args = [co.co_argcount] + args.append(co.co_kwonlyargcount) + args.extend( + [ + co.co_nlocals, + co.co_stacksize, + co.co_flags, + co.co_code, + co.co_consts, + co.co_names, + co.co_varnames, + co.co_filename, + co.co_name, + co.co_firstlineno, + co.co_lnotab, + # These two arguments are reversed: + co.co_cellvars, + co.co_freevars, + ] + ) + set_first_freevar_code = types.CodeType(*args) + + def set_closure_cell(cell, value): + # Create a function using the set_first_freevar_code, + # whose first closure cell is `cell`. Calling it will + # change the value of that cell. + setter = types.FunctionType( + set_first_freevar_code, {}, "setter", (), (cell,) + ) + # And call it to set the cell. + setter(value) + + # Make sure it works on this interpreter: + def make_func_with_cell(): + x = None + + def func(): + return x # pragma: no cover + + return func + + cell = make_func_with_cell().__closure__[0] + set_closure_cell(cell, 100) + if cell.cell_contents != 100: + raise AssertionError # pragma: no cover + + except Exception: + return just_warn + else: + return set_closure_cell + + +set_closure_cell = make_set_closure_cell() + +# Thread-local global to track attrs instances which are already being repr'd. +# This is needed because there is no other (thread-safe) way to pass info +# about the instances that are already being repr'd through the call stack +# in order to ensure we don't perform infinite recursion. +# +# For instance, if an instance contains a dict which contains that instance, +# we need to know that we're already repr'ing the outside instance from within +# the dict's repr() call. +# +# This lives here rather than in _make.py so that the functions in _make.py +# don't have a direct reference to the thread-local in their globals dict. +# If they have such a reference, it breaks cloudpickle. +repr_context = threading.local() diff --git a/venv/lib/python3.12/site-packages/attr/_config.py b/venv/lib/python3.12/site-packages/attr/_config.py new file mode 100644 index 0000000000..96d4200773 --- /dev/null +++ b/venv/lib/python3.12/site-packages/attr/_config.py @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: MIT + + +__all__ = ["set_run_validators", "get_run_validators"] + +_run_validators = True + + +def set_run_validators(run): + """ + Set whether or not validators are run. By default, they are run. + + .. deprecated:: 21.3.0 It will not be removed, but it also will not be + moved to new ``attrs`` namespace. Use `attrs.validators.set_disabled()` + instead. + """ + if not isinstance(run, bool): + raise TypeError("'run' must be bool.") + global _run_validators + _run_validators = run + + +def get_run_validators(): + """ + Return whether or not validators are run. + + .. deprecated:: 21.3.0 It will not be removed, but it also will not be + moved to new ``attrs`` namespace. Use `attrs.validators.get_disabled()` + instead. + """ + return _run_validators diff --git a/venv/lib/python3.12/site-packages/attr/_funcs.py b/venv/lib/python3.12/site-packages/attr/_funcs.py new file mode 100644 index 0000000000..a982d7cb56 --- /dev/null +++ b/venv/lib/python3.12/site-packages/attr/_funcs.py @@ -0,0 +1,420 @@ +# SPDX-License-Identifier: MIT + + +import copy + +from ._make import NOTHING, _obj_setattr, fields +from .exceptions import AttrsAttributeNotFoundError + + +def asdict( + inst, + recurse=True, + filter=None, + dict_factory=dict, + retain_collection_types=False, + value_serializer=None, +): + """ + Return the ``attrs`` attribute values of *inst* as a dict. + + Optionally recurse into other ``attrs``-decorated classes. + + :param inst: Instance of an ``attrs``-decorated class. + :param bool recurse: Recurse into classes that are also + ``attrs``-decorated. + :param callable filter: A callable whose return code determines whether an + attribute or element is included (``True``) or dropped (``False``). Is + called with the `attrs.Attribute` as the first argument and the + value as the second argument. + :param callable dict_factory: A callable to produce dictionaries from. For + example, to produce ordered dictionaries instead of normal Python + dictionaries, pass in ``collections.OrderedDict``. + :param bool retain_collection_types: Do not convert to ``list`` when + encountering an attribute whose type is ``tuple`` or ``set``. Only + meaningful if ``recurse`` is ``True``. + :param Optional[callable] value_serializer: A hook that is called for every + attribute or dict key/value. It receives the current instance, field + and value and must return the (updated) value. The hook is run *after* + the optional *filter* has been applied. + + :rtype: return type of *dict_factory* + + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class. + + .. versionadded:: 16.0.0 *dict_factory* + .. versionadded:: 16.1.0 *retain_collection_types* + .. versionadded:: 20.3.0 *value_serializer* + .. versionadded:: 21.3.0 If a dict has a collection for a key, it is + serialized as a tuple. + """ + attrs = fields(inst.__class__) + rv = dict_factory() + for a in attrs: + v = getattr(inst, a.name) + if filter is not None and not filter(a, v): + continue + + if value_serializer is not None: + v = value_serializer(inst, a, v) + + if recurse is True: + if has(v.__class__): + rv[a.name] = asdict( + v, + recurse=True, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + elif isinstance(v, (tuple, list, set, frozenset)): + cf = v.__class__ if retain_collection_types is True else list + rv[a.name] = cf( + [ + _asdict_anything( + i, + is_key=False, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + for i in v + ] + ) + elif isinstance(v, dict): + df = dict_factory + rv[a.name] = df( + ( + _asdict_anything( + kk, + is_key=True, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + _asdict_anything( + vv, + is_key=False, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + ) + for kk, vv in v.items() + ) + else: + rv[a.name] = v + else: + rv[a.name] = v + return rv + + +def _asdict_anything( + val, + is_key, + filter, + dict_factory, + retain_collection_types, + value_serializer, +): + """ + ``asdict`` only works on attrs instances, this works on anything. + """ + if getattr(val.__class__, "__attrs_attrs__", None) is not None: + # Attrs class. + rv = asdict( + val, + recurse=True, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + elif isinstance(val, (tuple, list, set, frozenset)): + if retain_collection_types is True: + cf = val.__class__ + elif is_key: + cf = tuple + else: + cf = list + + rv = cf( + [ + _asdict_anything( + i, + is_key=False, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + for i in val + ] + ) + elif isinstance(val, dict): + df = dict_factory + rv = df( + ( + _asdict_anything( + kk, + is_key=True, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + _asdict_anything( + vv, + is_key=False, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + ) + for kk, vv in val.items() + ) + else: + rv = val + if value_serializer is not None: + rv = value_serializer(None, None, rv) + + return rv + + +def astuple( + inst, + recurse=True, + filter=None, + tuple_factory=tuple, + retain_collection_types=False, +): + """ + Return the ``attrs`` attribute values of *inst* as a tuple. + + Optionally recurse into other ``attrs``-decorated classes. + + :param inst: Instance of an ``attrs``-decorated class. + :param bool recurse: Recurse into classes that are also + ``attrs``-decorated. + :param callable filter: A callable whose return code determines whether an + attribute or element is included (``True``) or dropped (``False``). Is + called with the `attrs.Attribute` as the first argument and the + value as the second argument. + :param callable tuple_factory: A callable to produce tuples from. For + example, to produce lists instead of tuples. + :param bool retain_collection_types: Do not convert to ``list`` + or ``dict`` when encountering an attribute which type is + ``tuple``, ``dict`` or ``set``. Only meaningful if ``recurse`` is + ``True``. + + :rtype: return type of *tuple_factory* + + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class. + + .. versionadded:: 16.2.0 + """ + attrs = fields(inst.__class__) + rv = [] + retain = retain_collection_types # Very long. :/ + for a in attrs: + v = getattr(inst, a.name) + if filter is not None and not filter(a, v): + continue + if recurse is True: + if has(v.__class__): + rv.append( + astuple( + v, + recurse=True, + filter=filter, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + ) + elif isinstance(v, (tuple, list, set, frozenset)): + cf = v.__class__ if retain is True else list + rv.append( + cf( + [ + astuple( + j, + recurse=True, + filter=filter, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(j.__class__) + else j + for j in v + ] + ) + ) + elif isinstance(v, dict): + df = v.__class__ if retain is True else dict + rv.append( + df( + ( + astuple( + kk, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(kk.__class__) + else kk, + astuple( + vv, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(vv.__class__) + else vv, + ) + for kk, vv in v.items() + ) + ) + else: + rv.append(v) + else: + rv.append(v) + + return rv if tuple_factory is list else tuple_factory(rv) + + +def has(cls): + """ + Check whether *cls* is a class with ``attrs`` attributes. + + :param type cls: Class to introspect. + :raise TypeError: If *cls* is not a class. + + :rtype: bool + """ + return getattr(cls, "__attrs_attrs__", None) is not None + + +def assoc(inst, **changes): + """ + Copy *inst* and apply *changes*. + + :param inst: Instance of a class with ``attrs`` attributes. + :param changes: Keyword changes in the new copy. + + :return: A copy of inst with *changes* incorporated. + + :raise attr.exceptions.AttrsAttributeNotFoundError: If *attr_name* couldn't + be found on *cls*. + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class. + + .. deprecated:: 17.1.0 + Use `attrs.evolve` instead if you can. + This function will not be removed du to the slightly different approach + compared to `attrs.evolve`. + """ + import warnings + + warnings.warn( + "assoc is deprecated and will be removed after 2018/01.", + DeprecationWarning, + stacklevel=2, + ) + new = copy.copy(inst) + attrs = fields(inst.__class__) + for k, v in changes.items(): + a = getattr(attrs, k, NOTHING) + if a is NOTHING: + raise AttrsAttributeNotFoundError( + "{k} is not an attrs attribute on {cl}.".format( + k=k, cl=new.__class__ + ) + ) + _obj_setattr(new, k, v) + return new + + +def evolve(inst, **changes): + """ + Create a new instance, based on *inst* with *changes* applied. + + :param inst: Instance of a class with ``attrs`` attributes. + :param changes: Keyword changes in the new copy. + + :return: A copy of inst with *changes* incorporated. + + :raise TypeError: If *attr_name* couldn't be found in the class + ``__init__``. + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class. + + .. versionadded:: 17.1.0 + """ + cls = inst.__class__ + attrs = fields(cls) + for a in attrs: + if not a.init: + continue + attr_name = a.name # To deal with private attributes. + init_name = attr_name if attr_name[0] != "_" else attr_name[1:] + if init_name not in changes: + changes[init_name] = getattr(inst, attr_name) + + return cls(**changes) + + +def resolve_types(cls, globalns=None, localns=None, attribs=None): + """ + Resolve any strings and forward annotations in type annotations. + + This is only required if you need concrete types in `Attribute`'s *type* + field. In other words, you don't need to resolve your types if you only + use them for static type checking. + + With no arguments, names will be looked up in the module in which the class + was created. If this is not what you want, e.g. if the name only exists + inside a method, you may pass *globalns* or *localns* to specify other + dictionaries in which to look up these names. See the docs of + `typing.get_type_hints` for more details. + + :param type cls: Class to resolve. + :param Optional[dict] globalns: Dictionary containing global variables. + :param Optional[dict] localns: Dictionary containing local variables. + :param Optional[list] attribs: List of attribs for the given class. + This is necessary when calling from inside a ``field_transformer`` + since *cls* is not an ``attrs`` class yet. + + :raise TypeError: If *cls* is not a class. + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class and you didn't pass any attribs. + :raise NameError: If types cannot be resolved because of missing variables. + + :returns: *cls* so you can use this function also as a class decorator. + Please note that you have to apply it **after** `attrs.define`. That + means the decorator has to come in the line **before** `attrs.define`. + + .. versionadded:: 20.1.0 + .. versionadded:: 21.1.0 *attribs* + + """ + # Since calling get_type_hints is expensive we cache whether we've + # done it already. + if getattr(cls, "__attrs_types_resolved__", None) != cls: + import typing + + hints = typing.get_type_hints(cls, globalns=globalns, localns=localns) + for field in fields(cls) if attribs is None else attribs: + if field.name in hints: + # Since fields have been frozen we must work around it. + _obj_setattr(field, "type", hints[field.name]) + # We store the class we resolved so that subclasses know they haven't + # been resolved. + cls.__attrs_types_resolved__ = cls + + # Return the class so you can use it as a decorator too. + return cls diff --git a/venv/lib/python3.12/site-packages/attr/_make.py b/venv/lib/python3.12/site-packages/attr/_make.py new file mode 100644 index 0000000000..4d1afe3fc8 --- /dev/null +++ b/venv/lib/python3.12/site-packages/attr/_make.py @@ -0,0 +1,3006 @@ +# SPDX-License-Identifier: MIT + +import copy +import linecache +import sys +import types +import typing + +from operator import itemgetter + +# We need to import _compat itself in addition to the _compat members to avoid +# having the thread-local in the globals here. +from . import _compat, _config, setters +from ._compat import ( + HAS_F_STRINGS, + PY310, + PYPY, + _AnnotationExtractor, + ordered_dict, + set_closure_cell, +) +from .exceptions import ( + DefaultAlreadySetError, + FrozenInstanceError, + NotAnAttrsClassError, + UnannotatedAttributeError, +) + + +# This is used at least twice, so cache it here. +_obj_setattr = object.__setattr__ +_init_converter_pat = "__attr_converter_%s" +_init_factory_pat = "__attr_factory_{}" +_tuple_property_pat = ( + " {attr_name} = _attrs_property(_attrs_itemgetter({index}))" +) +_classvar_prefixes = ( + "typing.ClassVar", + "t.ClassVar", + "ClassVar", + "typing_extensions.ClassVar", +) +# we don't use a double-underscore prefix because that triggers +# name mangling when trying to create a slot for the field +# (when slots=True) +_hash_cache_field = "_attrs_cached_hash" + +_empty_metadata_singleton = types.MappingProxyType({}) + +# Unique object for unequivocal getattr() defaults. +_sentinel = object() + +_ng_default_on_setattr = setters.pipe(setters.convert, setters.validate) + + +class _Nothing: + """ + Sentinel class to indicate the lack of a value when ``None`` is ambiguous. + + ``_Nothing`` is a singleton. There is only ever one of it. + + .. versionchanged:: 21.1.0 ``bool(NOTHING)`` is now False. + """ + + _singleton = None + + def __new__(cls): + if _Nothing._singleton is None: + _Nothing._singleton = super().__new__(cls) + return _Nothing._singleton + + def __repr__(self): + return "NOTHING" + + def __bool__(self): + return False + + +NOTHING = _Nothing() +""" +Sentinel to indicate the lack of a value when ``None`` is ambiguous. +""" + + +class _CacheHashWrapper(int): + """ + An integer subclass that pickles / copies as None + + This is used for non-slots classes with ``cache_hash=True``, to avoid + serializing a potentially (even likely) invalid hash value. Since ``None`` + is the default value for uncalculated hashes, whenever this is copied, + the copy's value for the hash should automatically reset. + + See GH #613 for more details. + """ + + def __reduce__(self, _none_constructor=type(None), _args=()): + return _none_constructor, _args + + +def attrib( + default=NOTHING, + validator=None, + repr=True, + cmp=None, + hash=None, + init=True, + metadata=None, + type=None, + converter=None, + factory=None, + kw_only=False, + eq=None, + order=None, + on_setattr=None, +): + """ + Create a new attribute on a class. + + .. warning:: + + Does *not* do anything unless the class is also decorated with + `attr.s`! + + :param default: A value that is used if an ``attrs``-generated ``__init__`` + is used and no value is passed while instantiating or the attribute is + excluded using ``init=False``. + + If the value is an instance of `attrs.Factory`, its callable will be + used to construct a new value (useful for mutable data types like lists + or dicts). + + If a default is not set (or set manually to `attrs.NOTHING`), a value + *must* be supplied when instantiating; otherwise a `TypeError` + will be raised. + + The default can also be set using decorator notation as shown below. + + :type default: Any value + + :param callable factory: Syntactic sugar for + ``default=attr.Factory(factory)``. + + :param validator: `callable` that is called by ``attrs``-generated + ``__init__`` methods after the instance has been initialized. They + receive the initialized instance, the :func:`~attrs.Attribute`, and the + passed value. + + The return value is *not* inspected so the validator has to throw an + exception itself. + + If a `list` is passed, its items are treated as validators and must + all pass. + + Validators can be globally disabled and re-enabled using + `get_run_validators`. + + The validator can also be set using decorator notation as shown below. + + :type validator: `callable` or a `list` of `callable`\\ s. + + :param repr: Include this attribute in the generated ``__repr__`` + method. If ``True``, include the attribute; if ``False``, omit it. By + default, the built-in ``repr()`` function is used. To override how the + attribute value is formatted, pass a ``callable`` that takes a single + value and returns a string. Note that the resulting string is used + as-is, i.e. it will be used directly *instead* of calling ``repr()`` + (the default). + :type repr: a `bool` or a `callable` to use a custom function. + + :param eq: If ``True`` (default), include this attribute in the + generated ``__eq__`` and ``__ne__`` methods that check two instances + for equality. To override how the attribute value is compared, + pass a ``callable`` that takes a single value and returns the value + to be compared. + :type eq: a `bool` or a `callable`. + + :param order: If ``True`` (default), include this attributes in the + generated ``__lt__``, ``__le__``, ``__gt__`` and ``__ge__`` methods. + To override how the attribute value is ordered, + pass a ``callable`` that takes a single value and returns the value + to be ordered. + :type order: a `bool` or a `callable`. + + :param cmp: Setting *cmp* is equivalent to setting *eq* and *order* to the + same value. Must not be mixed with *eq* or *order*. + :type cmp: a `bool` or a `callable`. + + :param Optional[bool] hash: Include this attribute in the generated + ``__hash__`` method. If ``None`` (default), mirror *eq*'s value. This + is the correct behavior according the Python spec. Setting this value + to anything else than ``None`` is *discouraged*. + :param bool init: Include this attribute in the generated ``__init__`` + method. It is possible to set this to ``False`` and set a default + value. In that case this attributed is unconditionally initialized + with the specified default value or factory. + :param callable converter: `callable` that is called by + ``attrs``-generated ``__init__`` methods to convert attribute's value + to the desired format. It is given the passed-in value, and the + returned value will be used as the new value of the attribute. The + value is converted before being passed to the validator, if any. + :param metadata: An arbitrary mapping, to be used by third-party + components. See `extending_metadata`. + :param type: The type of the attribute. In Python 3.6 or greater, the + preferred method to specify the type is using a variable annotation + (see :pep:`526`). + This argument is provided for backward compatibility. + Regardless of the approach used, the type will be stored on + ``Attribute.type``. + + Please note that ``attrs`` doesn't do anything with this metadata by + itself. You can use it as part of your own code or for + `static type checking `. + :param kw_only: Make this attribute keyword-only (Python 3+) + in the generated ``__init__`` (if ``init`` is ``False``, this + parameter is ignored). + :param on_setattr: Allows to overwrite the *on_setattr* setting from + `attr.s`. If left `None`, the *on_setattr* value from `attr.s` is used. + Set to `attrs.setters.NO_OP` to run **no** `setattr` hooks for this + attribute -- regardless of the setting in `attr.s`. + :type on_setattr: `callable`, or a list of callables, or `None`, or + `attrs.setters.NO_OP` + + .. versionadded:: 15.2.0 *convert* + .. versionadded:: 16.3.0 *metadata* + .. versionchanged:: 17.1.0 *validator* can be a ``list`` now. + .. versionchanged:: 17.1.0 + *hash* is ``None`` and therefore mirrors *eq* by default. + .. versionadded:: 17.3.0 *type* + .. deprecated:: 17.4.0 *convert* + .. versionadded:: 17.4.0 *converter* as a replacement for the deprecated + *convert* to achieve consistency with other noun-based arguments. + .. versionadded:: 18.1.0 + ``factory=f`` is syntactic sugar for ``default=attr.Factory(f)``. + .. versionadded:: 18.2.0 *kw_only* + .. versionchanged:: 19.2.0 *convert* keyword argument removed. + .. versionchanged:: 19.2.0 *repr* also accepts a custom callable. + .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. + .. versionadded:: 19.2.0 *eq* and *order* + .. versionadded:: 20.1.0 *on_setattr* + .. versionchanged:: 20.3.0 *kw_only* backported to Python 2 + .. versionchanged:: 21.1.0 + *eq*, *order*, and *cmp* also accept a custom callable + .. versionchanged:: 21.1.0 *cmp* undeprecated + """ + eq, eq_key, order, order_key = _determine_attrib_eq_order( + cmp, eq, order, True + ) + + if hash is not None and hash is not True and hash is not False: + raise TypeError( + "Invalid value for hash. Must be True, False, or None." + ) + + if factory is not None: + if default is not NOTHING: + raise ValueError( + "The `default` and `factory` arguments are mutually " + "exclusive." + ) + if not callable(factory): + raise ValueError("The `factory` argument must be a callable.") + default = Factory(factory) + + if metadata is None: + metadata = {} + + # Apply syntactic sugar by auto-wrapping. + if isinstance(on_setattr, (list, tuple)): + on_setattr = setters.pipe(*on_setattr) + + if validator and isinstance(validator, (list, tuple)): + validator = and_(*validator) + + if converter and isinstance(converter, (list, tuple)): + converter = pipe(*converter) + + return _CountingAttr( + default=default, + validator=validator, + repr=repr, + cmp=None, + hash=hash, + init=init, + converter=converter, + metadata=metadata, + type=type, + kw_only=kw_only, + eq=eq, + eq_key=eq_key, + order=order, + order_key=order_key, + on_setattr=on_setattr, + ) + + +def _compile_and_eval(script, globs, locs=None, filename=""): + """ + "Exec" the script with the given global (globs) and local (locs) variables. + """ + bytecode = compile(script, filename, "exec") + eval(bytecode, globs, locs) + + +def _make_method(name, script, filename, globs): + """ + Create the method with the script given and return the method object. + """ + locs = {} + + # In order of debuggers like PDB being able to step through the code, + # we add a fake linecache entry. + count = 1 + base_filename = filename + while True: + linecache_tuple = ( + len(script), + None, + script.splitlines(True), + filename, + ) + old_val = linecache.cache.setdefault(filename, linecache_tuple) + if old_val == linecache_tuple: + break + else: + filename = "{}-{}>".format(base_filename[:-1], count) + count += 1 + + _compile_and_eval(script, globs, locs, filename) + + return locs[name] + + +def _make_attr_tuple_class(cls_name, attr_names): + """ + Create a tuple subclass to hold `Attribute`s for an `attrs` class. + + The subclass is a bare tuple with properties for names. + + class MyClassAttributes(tuple): + __slots__ = () + x = property(itemgetter(0)) + """ + attr_class_name = "{}Attributes".format(cls_name) + attr_class_template = [ + "class {}(tuple):".format(attr_class_name), + " __slots__ = ()", + ] + if attr_names: + for i, attr_name in enumerate(attr_names): + attr_class_template.append( + _tuple_property_pat.format(index=i, attr_name=attr_name) + ) + else: + attr_class_template.append(" pass") + globs = {"_attrs_itemgetter": itemgetter, "_attrs_property": property} + _compile_and_eval("\n".join(attr_class_template), globs) + return globs[attr_class_name] + + +# Tuple class for extracted attributes from a class definition. +# `base_attrs` is a subset of `attrs`. +_Attributes = _make_attr_tuple_class( + "_Attributes", + [ + # all attributes to build dunder methods for + "attrs", + # attributes that have been inherited + "base_attrs", + # map inherited attributes to their originating classes + "base_attrs_map", + ], +) + + +def _is_class_var(annot): + """ + Check whether *annot* is a typing.ClassVar. + + The string comparison hack is used to avoid evaluating all string + annotations which would put attrs-based classes at a performance + disadvantage compared to plain old classes. + """ + annot = str(annot) + + # Annotation can be quoted. + if annot.startswith(("'", '"')) and annot.endswith(("'", '"')): + annot = annot[1:-1] + + return annot.startswith(_classvar_prefixes) + + +def _has_own_attribute(cls, attrib_name): + """ + Check whether *cls* defines *attrib_name* (and doesn't just inherit it). + + Requires Python 3. + """ + attr = getattr(cls, attrib_name, _sentinel) + if attr is _sentinel: + return False + + for base_cls in cls.__mro__[1:]: + a = getattr(base_cls, attrib_name, None) + if attr is a: + return False + + return True + + +def _get_annotations(cls): + """ + Get annotations for *cls*. + """ + if _has_own_attribute(cls, "__annotations__"): + return cls.__annotations__ + + return {} + + +def _counter_getter(e): + """ + Key function for sorting to avoid re-creating a lambda for every class. + """ + return e[1].counter + + +def _collect_base_attrs(cls, taken_attr_names): + """ + Collect attr.ibs from base classes of *cls*, except *taken_attr_names*. + """ + base_attrs = [] + base_attr_map = {} # A dictionary of base attrs to their classes. + + # Traverse the MRO and collect attributes. + for base_cls in reversed(cls.__mro__[1:-1]): + for a in getattr(base_cls, "__attrs_attrs__", []): + if a.inherited or a.name in taken_attr_names: + continue + + a = a.evolve(inherited=True) + base_attrs.append(a) + base_attr_map[a.name] = base_cls + + # For each name, only keep the freshest definition i.e. the furthest at the + # back. base_attr_map is fine because it gets overwritten with every new + # instance. + filtered = [] + seen = set() + for a in reversed(base_attrs): + if a.name in seen: + continue + filtered.insert(0, a) + seen.add(a.name) + + return filtered, base_attr_map + + +def _collect_base_attrs_broken(cls, taken_attr_names): + """ + Collect attr.ibs from base classes of *cls*, except *taken_attr_names*. + + N.B. *taken_attr_names* will be mutated. + + Adhere to the old incorrect behavior. + + Notably it collects from the front and considers inherited attributes which + leads to the buggy behavior reported in #428. + """ + base_attrs = [] + base_attr_map = {} # A dictionary of base attrs to their classes. + + # Traverse the MRO and collect attributes. + for base_cls in cls.__mro__[1:-1]: + for a in getattr(base_cls, "__attrs_attrs__", []): + if a.name in taken_attr_names: + continue + + a = a.evolve(inherited=True) + taken_attr_names.add(a.name) + base_attrs.append(a) + base_attr_map[a.name] = base_cls + + return base_attrs, base_attr_map + + +def _transform_attrs( + cls, these, auto_attribs, kw_only, collect_by_mro, field_transformer +): + """ + Transform all `_CountingAttr`s on a class into `Attribute`s. + + If *these* is passed, use that and don't look for them on the class. + + *collect_by_mro* is True, collect them in the correct MRO order, otherwise + use the old -- incorrect -- order. See #428. + + Return an `_Attributes`. + """ + cd = cls.__dict__ + anns = _get_annotations(cls) + + if these is not None: + ca_list = [(name, ca) for name, ca in these.items()] + + if not isinstance(these, ordered_dict): + ca_list.sort(key=_counter_getter) + elif auto_attribs is True: + ca_names = { + name + for name, attr in cd.items() + if isinstance(attr, _CountingAttr) + } + ca_list = [] + annot_names = set() + for attr_name, type in anns.items(): + if _is_class_var(type): + continue + annot_names.add(attr_name) + a = cd.get(attr_name, NOTHING) + + if not isinstance(a, _CountingAttr): + if a is NOTHING: + a = attrib() + else: + a = attrib(default=a) + ca_list.append((attr_name, a)) + + unannotated = ca_names - annot_names + if len(unannotated) > 0: + raise UnannotatedAttributeError( + "The following `attr.ib`s lack a type annotation: " + + ", ".join( + sorted(unannotated, key=lambda n: cd.get(n).counter) + ) + + "." + ) + else: + ca_list = sorted( + ( + (name, attr) + for name, attr in cd.items() + if isinstance(attr, _CountingAttr) + ), + key=lambda e: e[1].counter, + ) + + own_attrs = [ + Attribute.from_counting_attr( + name=attr_name, ca=ca, type=anns.get(attr_name) + ) + for attr_name, ca in ca_list + ] + + if collect_by_mro: + base_attrs, base_attr_map = _collect_base_attrs( + cls, {a.name for a in own_attrs} + ) + else: + base_attrs, base_attr_map = _collect_base_attrs_broken( + cls, {a.name for a in own_attrs} + ) + + if kw_only: + own_attrs = [a.evolve(kw_only=True) for a in own_attrs] + base_attrs = [a.evolve(kw_only=True) for a in base_attrs] + + attrs = base_attrs + own_attrs + + # Mandatory vs non-mandatory attr order only matters when they are part of + # the __init__ signature and when they aren't kw_only (which are moved to + # the end and can be mandatory or non-mandatory in any order, as they will + # be specified as keyword args anyway). Check the order of those attrs: + had_default = False + for a in (a for a in attrs if a.init is not False and a.kw_only is False): + if had_default is True and a.default is NOTHING: + raise ValueError( + "No mandatory attributes allowed after an attribute with a " + "default value or factory. Attribute in question: %r" % (a,) + ) + + if had_default is False and a.default is not NOTHING: + had_default = True + + if field_transformer is not None: + attrs = field_transformer(cls, attrs) + + # Create AttrsClass *after* applying the field_transformer since it may + # add or remove attributes! + attr_names = [a.name for a in attrs] + AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names) + + return _Attributes((AttrsClass(attrs), base_attrs, base_attr_map)) + + +if PYPY: + + def _frozen_setattrs(self, name, value): + """ + Attached to frozen classes as __setattr__. + """ + if isinstance(self, BaseException) and name in ( + "__cause__", + "__context__", + ): + BaseException.__setattr__(self, name, value) + return + + raise FrozenInstanceError() + +else: + + def _frozen_setattrs(self, name, value): + """ + Attached to frozen classes as __setattr__. + """ + raise FrozenInstanceError() + + +def _frozen_delattrs(self, name): + """ + Attached to frozen classes as __delattr__. + """ + raise FrozenInstanceError() + + +class _ClassBuilder: + """ + Iteratively build *one* class. + """ + + __slots__ = ( + "_attr_names", + "_attrs", + "_base_attr_map", + "_base_names", + "_cache_hash", + "_cls", + "_cls_dict", + "_delete_attribs", + "_frozen", + "_has_pre_init", + "_has_post_init", + "_is_exc", + "_on_setattr", + "_slots", + "_weakref_slot", + "_wrote_own_setattr", + "_has_custom_setattr", + ) + + def __init__( + self, + cls, + these, + slots, + frozen, + weakref_slot, + getstate_setstate, + auto_attribs, + kw_only, + cache_hash, + is_exc, + collect_by_mro, + on_setattr, + has_custom_setattr, + field_transformer, + ): + attrs, base_attrs, base_map = _transform_attrs( + cls, + these, + auto_attribs, + kw_only, + collect_by_mro, + field_transformer, + ) + + self._cls = cls + self._cls_dict = dict(cls.__dict__) if slots else {} + self._attrs = attrs + self._base_names = {a.name for a in base_attrs} + self._base_attr_map = base_map + self._attr_names = tuple(a.name for a in attrs) + self._slots = slots + self._frozen = frozen + self._weakref_slot = weakref_slot + self._cache_hash = cache_hash + self._has_pre_init = bool(getattr(cls, "__attrs_pre_init__", False)) + self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False)) + self._delete_attribs = not bool(these) + self._is_exc = is_exc + self._on_setattr = on_setattr + + self._has_custom_setattr = has_custom_setattr + self._wrote_own_setattr = False + + self._cls_dict["__attrs_attrs__"] = self._attrs + + if frozen: + self._cls_dict["__setattr__"] = _frozen_setattrs + self._cls_dict["__delattr__"] = _frozen_delattrs + + self._wrote_own_setattr = True + elif on_setattr in ( + _ng_default_on_setattr, + setters.validate, + setters.convert, + ): + has_validator = has_converter = False + for a in attrs: + if a.validator is not None: + has_validator = True + if a.converter is not None: + has_converter = True + + if has_validator and has_converter: + break + if ( + ( + on_setattr == _ng_default_on_setattr + and not (has_validator or has_converter) + ) + or (on_setattr == setters.validate and not has_validator) + or (on_setattr == setters.convert and not has_converter) + ): + # If class-level on_setattr is set to convert + validate, but + # there's no field to convert or validate, pretend like there's + # no on_setattr. + self._on_setattr = None + + if getstate_setstate: + ( + self._cls_dict["__getstate__"], + self._cls_dict["__setstate__"], + ) = self._make_getstate_setstate() + + def __repr__(self): + return "<_ClassBuilder(cls={cls})>".format(cls=self._cls.__name__) + + def build_class(self): + """ + Finalize class based on the accumulated configuration. + + Builder cannot be used after calling this method. + """ + if self._slots is True: + return self._create_slots_class() + else: + return self._patch_original_class() + + def _patch_original_class(self): + """ + Apply accumulated methods and return the class. + """ + cls = self._cls + base_names = self._base_names + + # Clean class of attribute definitions (`attr.ib()`s). + if self._delete_attribs: + for name in self._attr_names: + if ( + name not in base_names + and getattr(cls, name, _sentinel) is not _sentinel + ): + try: + delattr(cls, name) + except AttributeError: + # This can happen if a base class defines a class + # variable and we want to set an attribute with the + # same name by using only a type annotation. + pass + + # Attach our dunder methods. + for name, value in self._cls_dict.items(): + setattr(cls, name, value) + + # If we've inherited an attrs __setattr__ and don't write our own, + # reset it to object's. + if not self._wrote_own_setattr and getattr( + cls, "__attrs_own_setattr__", False + ): + cls.__attrs_own_setattr__ = False + + if not self._has_custom_setattr: + cls.__setattr__ = _obj_setattr + + return cls + + def _create_slots_class(self): + """ + Build and return a new class with a `__slots__` attribute. + """ + cd = { + k: v + for k, v in self._cls_dict.items() + if k not in tuple(self._attr_names) + ("__dict__", "__weakref__") + } + + # If our class doesn't have its own implementation of __setattr__ + # (either from the user or by us), check the bases, if one of them has + # an attrs-made __setattr__, that needs to be reset. We don't walk the + # MRO because we only care about our immediate base classes. + # XXX: This can be confused by subclassing a slotted attrs class with + # XXX: a non-attrs class and subclass the resulting class with an attrs + # XXX: class. See `test_slotted_confused` for details. For now that's + # XXX: OK with us. + if not self._wrote_own_setattr: + cd["__attrs_own_setattr__"] = False + + if not self._has_custom_setattr: + for base_cls in self._cls.__bases__: + if base_cls.__dict__.get("__attrs_own_setattr__", False): + cd["__setattr__"] = _obj_setattr + break + + # Traverse the MRO to collect existing slots + # and check for an existing __weakref__. + existing_slots = dict() + weakref_inherited = False + for base_cls in self._cls.__mro__[1:-1]: + if base_cls.__dict__.get("__weakref__", None) is not None: + weakref_inherited = True + existing_slots.update( + { + name: getattr(base_cls, name) + for name in getattr(base_cls, "__slots__", []) + } + ) + + base_names = set(self._base_names) + + names = self._attr_names + if ( + self._weakref_slot + and "__weakref__" not in getattr(self._cls, "__slots__", ()) + and "__weakref__" not in names + and not weakref_inherited + ): + names += ("__weakref__",) + + # We only add the names of attributes that aren't inherited. + # Setting __slots__ to inherited attributes wastes memory. + slot_names = [name for name in names if name not in base_names] + # There are slots for attributes from current class + # that are defined in parent classes. + # As their descriptors may be overridden by a child class, + # we collect them here and update the class dict + reused_slots = { + slot: slot_descriptor + for slot, slot_descriptor in existing_slots.items() + if slot in slot_names + } + slot_names = [name for name in slot_names if name not in reused_slots] + cd.update(reused_slots) + if self._cache_hash: + slot_names.append(_hash_cache_field) + cd["__slots__"] = tuple(slot_names) + + cd["__qualname__"] = self._cls.__qualname__ + + # Create new class based on old class and our methods. + cls = type(self._cls)(self._cls.__name__, self._cls.__bases__, cd) + + # The following is a fix for + # . On Python 3, + # if a method mentions `__class__` or uses the no-arg super(), the + # compiler will bake a reference to the class in the method itself + # as `method.__closure__`. Since we replace the class with a + # clone, we rewrite these references so it keeps working. + for item in cls.__dict__.values(): + if isinstance(item, (classmethod, staticmethod)): + # Class- and staticmethods hide their functions inside. + # These might need to be rewritten as well. + closure_cells = getattr(item.__func__, "__closure__", None) + elif isinstance(item, property): + # Workaround for property `super()` shortcut (PY3-only). + # There is no universal way for other descriptors. + closure_cells = getattr(item.fget, "__closure__", None) + else: + closure_cells = getattr(item, "__closure__", None) + + if not closure_cells: # Catch None or the empty list. + continue + for cell in closure_cells: + try: + match = cell.cell_contents is self._cls + except ValueError: # ValueError: Cell is empty + pass + else: + if match: + set_closure_cell(cell, cls) + + return cls + + def add_repr(self, ns): + self._cls_dict["__repr__"] = self._add_method_dunders( + _make_repr(self._attrs, ns, self._cls) + ) + return self + + def add_str(self): + repr = self._cls_dict.get("__repr__") + if repr is None: + raise ValueError( + "__str__ can only be generated if a __repr__ exists." + ) + + def __str__(self): + return self.__repr__() + + self._cls_dict["__str__"] = self._add_method_dunders(__str__) + return self + + def _make_getstate_setstate(self): + """ + Create custom __setstate__ and __getstate__ methods. + """ + # __weakref__ is not writable. + state_attr_names = tuple( + an for an in self._attr_names if an != "__weakref__" + ) + + def slots_getstate(self): + """ + Automatically created by attrs. + """ + return tuple(getattr(self, name) for name in state_attr_names) + + hash_caching_enabled = self._cache_hash + + def slots_setstate(self, state): + """ + Automatically created by attrs. + """ + __bound_setattr = _obj_setattr.__get__(self, Attribute) + for name, value in zip(state_attr_names, state): + __bound_setattr(name, value) + + # The hash code cache is not included when the object is + # serialized, but it still needs to be initialized to None to + # indicate that the first call to __hash__ should be a cache + # miss. + if hash_caching_enabled: + __bound_setattr(_hash_cache_field, None) + + return slots_getstate, slots_setstate + + def make_unhashable(self): + self._cls_dict["__hash__"] = None + return self + + def add_hash(self): + self._cls_dict["__hash__"] = self._add_method_dunders( + _make_hash( + self._cls, + self._attrs, + frozen=self._frozen, + cache_hash=self._cache_hash, + ) + ) + + return self + + def add_init(self): + self._cls_dict["__init__"] = self._add_method_dunders( + _make_init( + self._cls, + self._attrs, + self._has_pre_init, + self._has_post_init, + self._frozen, + self._slots, + self._cache_hash, + self._base_attr_map, + self._is_exc, + self._on_setattr, + attrs_init=False, + ) + ) + + return self + + def add_match_args(self): + self._cls_dict["__match_args__"] = tuple( + field.name + for field in self._attrs + if field.init and not field.kw_only + ) + + def add_attrs_init(self): + self._cls_dict["__attrs_init__"] = self._add_method_dunders( + _make_init( + self._cls, + self._attrs, + self._has_pre_init, + self._has_post_init, + self._frozen, + self._slots, + self._cache_hash, + self._base_attr_map, + self._is_exc, + self._on_setattr, + attrs_init=True, + ) + ) + + return self + + def add_eq(self): + cd = self._cls_dict + + cd["__eq__"] = self._add_method_dunders( + _make_eq(self._cls, self._attrs) + ) + cd["__ne__"] = self._add_method_dunders(_make_ne()) + + return self + + def add_order(self): + cd = self._cls_dict + + cd["__lt__"], cd["__le__"], cd["__gt__"], cd["__ge__"] = ( + self._add_method_dunders(meth) + for meth in _make_order(self._cls, self._attrs) + ) + + return self + + def add_setattr(self): + if self._frozen: + return self + + sa_attrs = {} + for a in self._attrs: + on_setattr = a.on_setattr or self._on_setattr + if on_setattr and on_setattr is not setters.NO_OP: + sa_attrs[a.name] = a, on_setattr + + if not sa_attrs: + return self + + if self._has_custom_setattr: + # We need to write a __setattr__ but there already is one! + raise ValueError( + "Can't combine custom __setattr__ with on_setattr hooks." + ) + + # docstring comes from _add_method_dunders + def __setattr__(self, name, val): + try: + a, hook = sa_attrs[name] + except KeyError: + nval = val + else: + nval = hook(self, a, val) + + _obj_setattr(self, name, nval) + + self._cls_dict["__attrs_own_setattr__"] = True + self._cls_dict["__setattr__"] = self._add_method_dunders(__setattr__) + self._wrote_own_setattr = True + + return self + + def _add_method_dunders(self, method): + """ + Add __module__ and __qualname__ to a *method* if possible. + """ + try: + method.__module__ = self._cls.__module__ + except AttributeError: + pass + + try: + method.__qualname__ = ".".join( + (self._cls.__qualname__, method.__name__) + ) + except AttributeError: + pass + + try: + method.__doc__ = "Method generated by attrs for class %s." % ( + self._cls.__qualname__, + ) + except AttributeError: + pass + + return method + + +def _determine_attrs_eq_order(cmp, eq, order, default_eq): + """ + Validate the combination of *cmp*, *eq*, and *order*. Derive the effective + values of eq and order. If *eq* is None, set it to *default_eq*. + """ + if cmp is not None and any((eq is not None, order is not None)): + raise ValueError("Don't mix `cmp` with `eq' and `order`.") + + # cmp takes precedence due to bw-compatibility. + if cmp is not None: + return cmp, cmp + + # If left None, equality is set to the specified default and ordering + # mirrors equality. + if eq is None: + eq = default_eq + + if order is None: + order = eq + + if eq is False and order is True: + raise ValueError("`order` can only be True if `eq` is True too.") + + return eq, order + + +def _determine_attrib_eq_order(cmp, eq, order, default_eq): + """ + Validate the combination of *cmp*, *eq*, and *order*. Derive the effective + values of eq and order. If *eq* is None, set it to *default_eq*. + """ + if cmp is not None and any((eq is not None, order is not None)): + raise ValueError("Don't mix `cmp` with `eq' and `order`.") + + def decide_callable_or_boolean(value): + """ + Decide whether a key function is used. + """ + if callable(value): + value, key = True, value + else: + key = None + return value, key + + # cmp takes precedence due to bw-compatibility. + if cmp is not None: + cmp, cmp_key = decide_callable_or_boolean(cmp) + return cmp, cmp_key, cmp, cmp_key + + # If left None, equality is set to the specified default and ordering + # mirrors equality. + if eq is None: + eq, eq_key = default_eq, None + else: + eq, eq_key = decide_callable_or_boolean(eq) + + if order is None: + order, order_key = eq, eq_key + else: + order, order_key = decide_callable_or_boolean(order) + + if eq is False and order is True: + raise ValueError("`order` can only be True if `eq` is True too.") + + return eq, eq_key, order, order_key + + +def _determine_whether_to_implement( + cls, flag, auto_detect, dunders, default=True +): + """ + Check whether we should implement a set of methods for *cls*. + + *flag* is the argument passed into @attr.s like 'init', *auto_detect* the + same as passed into @attr.s and *dunders* is a tuple of attribute names + whose presence signal that the user has implemented it themselves. + + Return *default* if no reason for either for or against is found. + """ + if flag is True or flag is False: + return flag + + if flag is None and auto_detect is False: + return default + + # Logically, flag is None and auto_detect is True here. + for dunder in dunders: + if _has_own_attribute(cls, dunder): + return False + + return default + + +def attrs( + maybe_cls=None, + these=None, + repr_ns=None, + repr=None, + cmp=None, + hash=None, + init=None, + slots=False, + frozen=False, + weakref_slot=True, + str=False, + auto_attribs=False, + kw_only=False, + cache_hash=False, + auto_exc=False, + eq=None, + order=None, + auto_detect=False, + collect_by_mro=False, + getstate_setstate=None, + on_setattr=None, + field_transformer=None, + match_args=True, +): + r""" + A class decorator that adds `dunder + `_\ -methods according to the + specified attributes using `attr.ib` or the *these* argument. + + :param these: A dictionary of name to `attr.ib` mappings. This is + useful to avoid the definition of your attributes within the class body + because you can't (e.g. if you want to add ``__repr__`` methods to + Django models) or don't want to. + + If *these* is not ``None``, ``attrs`` will *not* search the class body + for attributes and will *not* remove any attributes from it. + + If *these* is an ordered dict (`dict` on Python 3.6+, + `collections.OrderedDict` otherwise), the order is deduced from + the order of the attributes inside *these*. Otherwise the order + of the definition of the attributes is used. + + :type these: `dict` of `str` to `attr.ib` + + :param str repr_ns: When using nested classes, there's no way in Python 2 + to automatically detect that. Therefore it's possible to set the + namespace explicitly for a more meaningful ``repr`` output. + :param bool auto_detect: Instead of setting the *init*, *repr*, *eq*, + *order*, and *hash* arguments explicitly, assume they are set to + ``True`` **unless any** of the involved methods for one of the + arguments is implemented in the *current* class (i.e. it is *not* + inherited from some base class). + + So for example by implementing ``__eq__`` on a class yourself, + ``attrs`` will deduce ``eq=False`` and will create *neither* + ``__eq__`` *nor* ``__ne__`` (but Python classes come with a sensible + ``__ne__`` by default, so it *should* be enough to only implement + ``__eq__`` in most cases). + + .. warning:: + + If you prevent ``attrs`` from creating the ordering methods for you + (``order=False``, e.g. by implementing ``__le__``), it becomes + *your* responsibility to make sure its ordering is sound. The best + way is to use the `functools.total_ordering` decorator. + + + Passing ``True`` or ``False`` to *init*, *repr*, *eq*, *order*, + *cmp*, or *hash* overrides whatever *auto_detect* would determine. + + *auto_detect* requires Python 3. Setting it ``True`` on Python 2 raises + an `attrs.exceptions.PythonTooOldError`. + + :param bool repr: Create a ``__repr__`` method with a human readable + representation of ``attrs`` attributes.. + :param bool str: Create a ``__str__`` method that is identical to + ``__repr__``. This is usually not necessary except for + `Exception`\ s. + :param Optional[bool] eq: If ``True`` or ``None`` (default), add ``__eq__`` + and ``__ne__`` methods that check two instances for equality. + + They compare the instances as if they were tuples of their ``attrs`` + attributes if and only if the types of both classes are *identical*! + :param Optional[bool] order: If ``True``, add ``__lt__``, ``__le__``, + ``__gt__``, and ``__ge__`` methods that behave like *eq* above and + allow instances to be ordered. If ``None`` (default) mirror value of + *eq*. + :param Optional[bool] cmp: Setting *cmp* is equivalent to setting *eq* + and *order* to the same value. Must not be mixed with *eq* or *order*. + :param Optional[bool] hash: If ``None`` (default), the ``__hash__`` method + is generated according how *eq* and *frozen* are set. + + 1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you. + 2. If *eq* is True and *frozen* is False, ``__hash__`` will be set to + None, marking it unhashable (which it is). + 3. If *eq* is False, ``__hash__`` will be left untouched meaning the + ``__hash__`` method of the base class will be used (if base class is + ``object``, this means it will fall back to id-based hashing.). + + Although not recommended, you can decide for yourself and force + ``attrs`` to create one (e.g. if the class is immutable even though you + didn't freeze it programmatically) by passing ``True`` or not. Both of + these cases are rather special and should be used carefully. + + See our documentation on `hashing`, Python's documentation on + `object.__hash__`, and the `GitHub issue that led to the default \ + behavior `_ for more + details. + :param bool init: Create a ``__init__`` method that initializes the + ``attrs`` attributes. Leading underscores are stripped for the argument + name. If a ``__attrs_pre_init__`` method exists on the class, it will + be called before the class is initialized. If a ``__attrs_post_init__`` + method exists on the class, it will be called after the class is fully + initialized. + + If ``init`` is ``False``, an ``__attrs_init__`` method will be + injected instead. This allows you to define a custom ``__init__`` + method that can do pre-init work such as ``super().__init__()``, + and then call ``__attrs_init__()`` and ``__attrs_post_init__()``. + :param bool slots: Create a `slotted class ` that's more + memory-efficient. Slotted classes are generally superior to the default + dict classes, but have some gotchas you should know about, so we + encourage you to read the `glossary entry `. + :param bool frozen: Make instances immutable after initialization. If + someone attempts to modify a frozen instance, + `attr.exceptions.FrozenInstanceError` is raised. + + .. note:: + + 1. This is achieved by installing a custom ``__setattr__`` method + on your class, so you can't implement your own. + + 2. True immutability is impossible in Python. + + 3. This *does* have a minor a runtime performance `impact + ` when initializing new instances. In other words: + ``__init__`` is slightly slower with ``frozen=True``. + + 4. If a class is frozen, you cannot modify ``self`` in + ``__attrs_post_init__`` or a self-written ``__init__``. You can + circumvent that limitation by using + ``object.__setattr__(self, "attribute_name", value)``. + + 5. Subclasses of a frozen class are frozen too. + + :param bool weakref_slot: Make instances weak-referenceable. This has no + effect unless ``slots`` is also enabled. + :param bool auto_attribs: If ``True``, collect :pep:`526`-annotated + attributes (Python 3.6 and later only) from the class body. + + In this case, you **must** annotate every field. If ``attrs`` + encounters a field that is set to an `attr.ib` but lacks a type + annotation, an `attr.exceptions.UnannotatedAttributeError` is + raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't + want to set a type. + + If you assign a value to those attributes (e.g. ``x: int = 42``), that + value becomes the default value like if it were passed using + ``attr.ib(default=42)``. Passing an instance of `attrs.Factory` also + works as expected in most cases (see warning below). + + Attributes annotated as `typing.ClassVar`, and attributes that are + neither annotated nor set to an `attr.ib` are **ignored**. + + .. warning:: + For features that use the attribute name to create decorators (e.g. + `validators `), you still *must* assign `attr.ib` to + them. Otherwise Python will either not find the name or try to use + the default value to call e.g. ``validator`` on it. + + These errors can be quite confusing and probably the most common bug + report on our bug tracker. + + :param bool kw_only: Make all attributes keyword-only (Python 3+) + in the generated ``__init__`` (if ``init`` is ``False``, this + parameter is ignored). + :param bool cache_hash: Ensure that the object's hash code is computed + only once and stored on the object. If this is set to ``True``, + hashing must be either explicitly or implicitly enabled for this + class. If the hash code is cached, avoid any reassignments of + fields involved in hash code computation or mutations of the objects + those fields point to after object creation. If such changes occur, + the behavior of the object's hash code is undefined. + :param bool auto_exc: If the class subclasses `BaseException` + (which implicitly includes any subclass of any exception), the + following happens to behave like a well-behaved Python exceptions + class: + + - the values for *eq*, *order*, and *hash* are ignored and the + instances compare and hash by the instance's ids (N.B. ``attrs`` will + *not* remove existing implementations of ``__hash__`` or the equality + methods. It just won't add own ones.), + - all attributes that are either passed into ``__init__`` or have a + default value are additionally available as a tuple in the ``args`` + attribute, + - the value of *str* is ignored leaving ``__str__`` to base classes. + :param bool collect_by_mro: Setting this to `True` fixes the way ``attrs`` + collects attributes from base classes. The default behavior is + incorrect in certain cases of multiple inheritance. It should be on by + default but is kept off for backward-compatibility. + + See issue `#428 `_ for + more details. + + :param Optional[bool] getstate_setstate: + .. note:: + This is usually only interesting for slotted classes and you should + probably just set *auto_detect* to `True`. + + If `True`, ``__getstate__`` and + ``__setstate__`` are generated and attached to the class. This is + necessary for slotted classes to be pickleable. If left `None`, it's + `True` by default for slotted classes and ``False`` for dict classes. + + If *auto_detect* is `True`, and *getstate_setstate* is left `None`, + and **either** ``__getstate__`` or ``__setstate__`` is detected directly + on the class (i.e. not inherited), it is set to `False` (this is usually + what you want). + + :param on_setattr: A callable that is run whenever the user attempts to set + an attribute (either by assignment like ``i.x = 42`` or by using + `setattr` like ``setattr(i, "x", 42)``). It receives the same arguments + as validators: the instance, the attribute that is being modified, and + the new value. + + If no exception is raised, the attribute is set to the return value of + the callable. + + If a list of callables is passed, they're automatically wrapped in an + `attrs.setters.pipe`. + :type on_setattr: `callable`, or a list of callables, or `None`, or + `attrs.setters.NO_OP` + + :param Optional[callable] field_transformer: + A function that is called with the original class object and all + fields right before ``attrs`` finalizes the class. You can use + this, e.g., to automatically add converters or validators to + fields based on their types. See `transform-fields` for more details. + + :param bool match_args: + If `True` (default), set ``__match_args__`` on the class to support + :pep:`634` (Structural Pattern Matching). It is a tuple of all + non-keyword-only ``__init__`` parameter names on Python 3.10 and later. + Ignored on older Python versions. + + .. versionadded:: 16.0.0 *slots* + .. versionadded:: 16.1.0 *frozen* + .. versionadded:: 16.3.0 *str* + .. versionadded:: 16.3.0 Support for ``__attrs_post_init__``. + .. versionchanged:: 17.1.0 + *hash* supports ``None`` as value which is also the default now. + .. versionadded:: 17.3.0 *auto_attribs* + .. versionchanged:: 18.1.0 + If *these* is passed, no attributes are deleted from the class body. + .. versionchanged:: 18.1.0 If *these* is ordered, the order is retained. + .. versionadded:: 18.2.0 *weakref_slot* + .. deprecated:: 18.2.0 + ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a + `DeprecationWarning` if the classes compared are subclasses of + each other. ``__eq`` and ``__ne__`` never tried to compared subclasses + to each other. + .. versionchanged:: 19.2.0 + ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now do not consider + subclasses comparable anymore. + .. versionadded:: 18.2.0 *kw_only* + .. versionadded:: 18.2.0 *cache_hash* + .. versionadded:: 19.1.0 *auto_exc* + .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. + .. versionadded:: 19.2.0 *eq* and *order* + .. versionadded:: 20.1.0 *auto_detect* + .. versionadded:: 20.1.0 *collect_by_mro* + .. versionadded:: 20.1.0 *getstate_setstate* + .. versionadded:: 20.1.0 *on_setattr* + .. versionadded:: 20.3.0 *field_transformer* + .. versionchanged:: 21.1.0 + ``init=False`` injects ``__attrs_init__`` + .. versionchanged:: 21.1.0 Support for ``__attrs_pre_init__`` + .. versionchanged:: 21.1.0 *cmp* undeprecated + .. versionadded:: 21.3.0 *match_args* + """ + eq_, order_ = _determine_attrs_eq_order(cmp, eq, order, None) + hash_ = hash # work around the lack of nonlocal + + if isinstance(on_setattr, (list, tuple)): + on_setattr = setters.pipe(*on_setattr) + + def wrap(cls): + is_frozen = frozen or _has_frozen_base_class(cls) + is_exc = auto_exc is True and issubclass(cls, BaseException) + has_own_setattr = auto_detect and _has_own_attribute( + cls, "__setattr__" + ) + + if has_own_setattr and is_frozen: + raise ValueError("Can't freeze a class with a custom __setattr__.") + + builder = _ClassBuilder( + cls, + these, + slots, + is_frozen, + weakref_slot, + _determine_whether_to_implement( + cls, + getstate_setstate, + auto_detect, + ("__getstate__", "__setstate__"), + default=slots, + ), + auto_attribs, + kw_only, + cache_hash, + is_exc, + collect_by_mro, + on_setattr, + has_own_setattr, + field_transformer, + ) + if _determine_whether_to_implement( + cls, repr, auto_detect, ("__repr__",) + ): + builder.add_repr(repr_ns) + if str is True: + builder.add_str() + + eq = _determine_whether_to_implement( + cls, eq_, auto_detect, ("__eq__", "__ne__") + ) + if not is_exc and eq is True: + builder.add_eq() + if not is_exc and _determine_whether_to_implement( + cls, order_, auto_detect, ("__lt__", "__le__", "__gt__", "__ge__") + ): + builder.add_order() + + builder.add_setattr() + + if ( + hash_ is None + and auto_detect is True + and _has_own_attribute(cls, "__hash__") + ): + hash = False + else: + hash = hash_ + if hash is not True and hash is not False and hash is not None: + # Can't use `hash in` because 1 == True for example. + raise TypeError( + "Invalid value for hash. Must be True, False, or None." + ) + elif hash is False or (hash is None and eq is False) or is_exc: + # Don't do anything. Should fall back to __object__'s __hash__ + # which is by id. + if cache_hash: + raise TypeError( + "Invalid value for cache_hash. To use hash caching," + " hashing must be either explicitly or implicitly " + "enabled." + ) + elif hash is True or ( + hash is None and eq is True and is_frozen is True + ): + # Build a __hash__ if told so, or if it's safe. + builder.add_hash() + else: + # Raise TypeError on attempts to hash. + if cache_hash: + raise TypeError( + "Invalid value for cache_hash. To use hash caching," + " hashing must be either explicitly or implicitly " + "enabled." + ) + builder.make_unhashable() + + if _determine_whether_to_implement( + cls, init, auto_detect, ("__init__",) + ): + builder.add_init() + else: + builder.add_attrs_init() + if cache_hash: + raise TypeError( + "Invalid value for cache_hash. To use hash caching," + " init must be True." + ) + + if ( + PY310 + and match_args + and not _has_own_attribute(cls, "__match_args__") + ): + builder.add_match_args() + + return builder.build_class() + + # maybe_cls's type depends on the usage of the decorator. It's a class + # if it's used as `@attrs` but ``None`` if used as `@attrs()`. + if maybe_cls is None: + return wrap + else: + return wrap(maybe_cls) + + +_attrs = attrs +""" +Internal alias so we can use it in functions that take an argument called +*attrs*. +""" + + +def _has_frozen_base_class(cls): + """ + Check whether *cls* has a frozen ancestor by looking at its + __setattr__. + """ + return cls.__setattr__ is _frozen_setattrs + + +def _generate_unique_filename(cls, func_name): + """ + Create a "filename" suitable for a function being generated. + """ + unique_filename = "".format( + func_name, + cls.__module__, + getattr(cls, "__qualname__", cls.__name__), + ) + return unique_filename + + +def _make_hash(cls, attrs, frozen, cache_hash): + attrs = tuple( + a for a in attrs if a.hash is True or (a.hash is None and a.eq is True) + ) + + tab = " " + + unique_filename = _generate_unique_filename(cls, "hash") + type_hash = hash(unique_filename) + # If eq is custom generated, we need to include the functions in globs + globs = {} + + hash_def = "def __hash__(self" + hash_func = "hash((" + closing_braces = "))" + if not cache_hash: + hash_def += "):" + else: + hash_def += ", *" + + hash_def += ( + ", _cache_wrapper=" + + "__import__('attr._make')._make._CacheHashWrapper):" + ) + hash_func = "_cache_wrapper(" + hash_func + closing_braces += ")" + + method_lines = [hash_def] + + def append_hash_computation_lines(prefix, indent): + """ + Generate the code for actually computing the hash code. + Below this will either be returned directly or used to compute + a value which is then cached, depending on the value of cache_hash + """ + + method_lines.extend( + [ + indent + prefix + hash_func, + indent + " %d," % (type_hash,), + ] + ) + + for a in attrs: + if a.eq_key: + cmp_name = "_%s_key" % (a.name,) + globs[cmp_name] = a.eq_key + method_lines.append( + indent + " %s(self.%s)," % (cmp_name, a.name) + ) + else: + method_lines.append(indent + " self.%s," % a.name) + + method_lines.append(indent + " " + closing_braces) + + if cache_hash: + method_lines.append(tab + "if self.%s is None:" % _hash_cache_field) + if frozen: + append_hash_computation_lines( + "object.__setattr__(self, '%s', " % _hash_cache_field, tab * 2 + ) + method_lines.append(tab * 2 + ")") # close __setattr__ + else: + append_hash_computation_lines( + "self.%s = " % _hash_cache_field, tab * 2 + ) + method_lines.append(tab + "return self.%s" % _hash_cache_field) + else: + append_hash_computation_lines("return ", tab) + + script = "\n".join(method_lines) + return _make_method("__hash__", script, unique_filename, globs) + + +def _add_hash(cls, attrs): + """ + Add a hash method to *cls*. + """ + cls.__hash__ = _make_hash(cls, attrs, frozen=False, cache_hash=False) + return cls + + +def _make_ne(): + """ + Create __ne__ method. + """ + + def __ne__(self, other): + """ + Check equality and either forward a NotImplemented or + return the result negated. + """ + result = self.__eq__(other) + if result is NotImplemented: + return NotImplemented + + return not result + + return __ne__ + + +def _make_eq(cls, attrs): + """ + Create __eq__ method for *cls* with *attrs*. + """ + attrs = [a for a in attrs if a.eq] + + unique_filename = _generate_unique_filename(cls, "eq") + lines = [ + "def __eq__(self, other):", + " if other.__class__ is not self.__class__:", + " return NotImplemented", + ] + + # We can't just do a big self.x = other.x and... clause due to + # irregularities like nan == nan is false but (nan,) == (nan,) is true. + globs = {} + if attrs: + lines.append(" return (") + others = [" ) == ("] + for a in attrs: + if a.eq_key: + cmp_name = "_%s_key" % (a.name,) + # Add the key function to the global namespace + # of the evaluated function. + globs[cmp_name] = a.eq_key + lines.append( + " %s(self.%s)," + % ( + cmp_name, + a.name, + ) + ) + others.append( + " %s(other.%s)," + % ( + cmp_name, + a.name, + ) + ) + else: + lines.append(" self.%s," % (a.name,)) + others.append(" other.%s," % (a.name,)) + + lines += others + [" )"] + else: + lines.append(" return True") + + script = "\n".join(lines) + + return _make_method("__eq__", script, unique_filename, globs) + + +def _make_order(cls, attrs): + """ + Create ordering methods for *cls* with *attrs*. + """ + attrs = [a for a in attrs if a.order] + + def attrs_to_tuple(obj): + """ + Save us some typing. + """ + return tuple( + key(value) if key else value + for value, key in ( + (getattr(obj, a.name), a.order_key) for a in attrs + ) + ) + + def __lt__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) < attrs_to_tuple(other) + + return NotImplemented + + def __le__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) <= attrs_to_tuple(other) + + return NotImplemented + + def __gt__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) > attrs_to_tuple(other) + + return NotImplemented + + def __ge__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) >= attrs_to_tuple(other) + + return NotImplemented + + return __lt__, __le__, __gt__, __ge__ + + +def _add_eq(cls, attrs=None): + """ + Add equality methods to *cls* with *attrs*. + """ + if attrs is None: + attrs = cls.__attrs_attrs__ + + cls.__eq__ = _make_eq(cls, attrs) + cls.__ne__ = _make_ne() + + return cls + + +if HAS_F_STRINGS: + + def _make_repr(attrs, ns, cls): + unique_filename = _generate_unique_filename(cls, "repr") + # Figure out which attributes to include, and which function to use to + # format them. The a.repr value can be either bool or a custom + # callable. + attr_names_with_reprs = tuple( + (a.name, (repr if a.repr is True else a.repr), a.init) + for a in attrs + if a.repr is not False + ) + globs = { + name + "_repr": r + for name, r, _ in attr_names_with_reprs + if r != repr + } + globs["_compat"] = _compat + globs["AttributeError"] = AttributeError + globs["NOTHING"] = NOTHING + attribute_fragments = [] + for name, r, i in attr_names_with_reprs: + accessor = ( + "self." + name + if i + else 'getattr(self, "' + name + '", NOTHING)' + ) + fragment = ( + "%s={%s!r}" % (name, accessor) + if r == repr + else "%s={%s_repr(%s)}" % (name, name, accessor) + ) + attribute_fragments.append(fragment) + repr_fragment = ", ".join(attribute_fragments) + + if ns is None: + cls_name_fragment = ( + '{self.__class__.__qualname__.rsplit(">.", 1)[-1]}' + ) + else: + cls_name_fragment = ns + ".{self.__class__.__name__}" + + lines = [ + "def __repr__(self):", + " try:", + " already_repring = _compat.repr_context.already_repring", + " except AttributeError:", + " already_repring = {id(self),}", + " _compat.repr_context.already_repring = already_repring", + " else:", + " if id(self) in already_repring:", + " return '...'", + " else:", + " already_repring.add(id(self))", + " try:", + " return f'%s(%s)'" % (cls_name_fragment, repr_fragment), + " finally:", + " already_repring.remove(id(self))", + ] + + return _make_method( + "__repr__", "\n".join(lines), unique_filename, globs=globs + ) + +else: + + def _make_repr(attrs, ns, _): + """ + Make a repr method that includes relevant *attrs*, adding *ns* to the + full name. + """ + + # Figure out which attributes to include, and which function to use to + # format them. The a.repr value can be either bool or a custom + # callable. + attr_names_with_reprs = tuple( + (a.name, repr if a.repr is True else a.repr) + for a in attrs + if a.repr is not False + ) + + def __repr__(self): + """ + Automatically created by attrs. + """ + try: + already_repring = _compat.repr_context.already_repring + except AttributeError: + already_repring = set() + _compat.repr_context.already_repring = already_repring + + if id(self) in already_repring: + return "..." + real_cls = self.__class__ + if ns is None: + class_name = real_cls.__qualname__.rsplit(">.", 1)[-1] + else: + class_name = ns + "." + real_cls.__name__ + + # Since 'self' remains on the stack (i.e.: strongly referenced) + # for the duration of this call, it's safe to depend on id(...) + # stability, and not need to track the instance and therefore + # worry about properties like weakref- or hash-ability. + already_repring.add(id(self)) + try: + result = [class_name, "("] + first = True + for name, attr_repr in attr_names_with_reprs: + if first: + first = False + else: + result.append(", ") + result.extend( + (name, "=", attr_repr(getattr(self, name, NOTHING))) + ) + return "".join(result) + ")" + finally: + already_repring.remove(id(self)) + + return __repr__ + + +def _add_repr(cls, ns=None, attrs=None): + """ + Add a repr method to *cls*. + """ + if attrs is None: + attrs = cls.__attrs_attrs__ + + cls.__repr__ = _make_repr(attrs, ns, cls) + return cls + + +def fields(cls): + """ + Return the tuple of ``attrs`` attributes for a class. + + The tuple also allows accessing the fields by their names (see below for + examples). + + :param type cls: Class to introspect. + + :raise TypeError: If *cls* is not a class. + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class. + + :rtype: tuple (with name accessors) of `attrs.Attribute` + + .. versionchanged:: 16.2.0 Returned tuple allows accessing the fields + by name. + """ + if not isinstance(cls, type): + raise TypeError("Passed object must be a class.") + attrs = getattr(cls, "__attrs_attrs__", None) + if attrs is None: + raise NotAnAttrsClassError( + "{cls!r} is not an attrs-decorated class.".format(cls=cls) + ) + return attrs + + +def fields_dict(cls): + """ + Return an ordered dictionary of ``attrs`` attributes for a class, whose + keys are the attribute names. + + :param type cls: Class to introspect. + + :raise TypeError: If *cls* is not a class. + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class. + + :rtype: an ordered dict where keys are attribute names and values are + `attrs.Attribute`\\ s. This will be a `dict` if it's + naturally ordered like on Python 3.6+ or an + :class:`~collections.OrderedDict` otherwise. + + .. versionadded:: 18.1.0 + """ + if not isinstance(cls, type): + raise TypeError("Passed object must be a class.") + attrs = getattr(cls, "__attrs_attrs__", None) + if attrs is None: + raise NotAnAttrsClassError( + "{cls!r} is not an attrs-decorated class.".format(cls=cls) + ) + return ordered_dict((a.name, a) for a in attrs) + + +def validate(inst): + """ + Validate all attributes on *inst* that have a validator. + + Leaves all exceptions through. + + :param inst: Instance of a class with ``attrs`` attributes. + """ + if _config._run_validators is False: + return + + for a in fields(inst.__class__): + v = a.validator + if v is not None: + v(inst, a, getattr(inst, a.name)) + + +def _is_slot_cls(cls): + return "__slots__" in cls.__dict__ + + +def _is_slot_attr(a_name, base_attr_map): + """ + Check if the attribute name comes from a slot class. + """ + return a_name in base_attr_map and _is_slot_cls(base_attr_map[a_name]) + + +def _make_init( + cls, + attrs, + pre_init, + post_init, + frozen, + slots, + cache_hash, + base_attr_map, + is_exc, + cls_on_setattr, + attrs_init, +): + has_cls_on_setattr = ( + cls_on_setattr is not None and cls_on_setattr is not setters.NO_OP + ) + + if frozen and has_cls_on_setattr: + raise ValueError("Frozen classes can't use on_setattr.") + + needs_cached_setattr = cache_hash or frozen + filtered_attrs = [] + attr_dict = {} + for a in attrs: + if not a.init and a.default is NOTHING: + continue + + filtered_attrs.append(a) + attr_dict[a.name] = a + + if a.on_setattr is not None: + if frozen is True: + raise ValueError("Frozen classes can't use on_setattr.") + + needs_cached_setattr = True + elif has_cls_on_setattr and a.on_setattr is not setters.NO_OP: + needs_cached_setattr = True + + unique_filename = _generate_unique_filename(cls, "init") + + script, globs, annotations = _attrs_to_init_script( + filtered_attrs, + frozen, + slots, + pre_init, + post_init, + cache_hash, + base_attr_map, + is_exc, + has_cls_on_setattr, + attrs_init, + ) + if cls.__module__ in sys.modules: + # This makes typing.get_type_hints(CLS.__init__) resolve string types. + globs.update(sys.modules[cls.__module__].__dict__) + + globs.update({"NOTHING": NOTHING, "attr_dict": attr_dict}) + + if needs_cached_setattr: + # Save the lookup overhead in __init__ if we need to circumvent + # setattr hooks. + globs["_setattr"] = _obj_setattr + + init = _make_method( + "__attrs_init__" if attrs_init else "__init__", + script, + unique_filename, + globs, + ) + init.__annotations__ = annotations + + return init + + +def _setattr(attr_name, value_var, has_on_setattr): + """ + Use the cached object.setattr to set *attr_name* to *value_var*. + """ + return "_setattr(self, '%s', %s)" % (attr_name, value_var) + + +def _setattr_with_converter(attr_name, value_var, has_on_setattr): + """ + Use the cached object.setattr to set *attr_name* to *value_var*, but run + its converter first. + """ + return "_setattr(self, '%s', %s(%s))" % ( + attr_name, + _init_converter_pat % (attr_name,), + value_var, + ) + + +def _assign(attr_name, value, has_on_setattr): + """ + Unless *attr_name* has an on_setattr hook, use normal assignment. Otherwise + relegate to _setattr. + """ + if has_on_setattr: + return _setattr(attr_name, value, True) + + return "self.%s = %s" % (attr_name, value) + + +def _assign_with_converter(attr_name, value_var, has_on_setattr): + """ + Unless *attr_name* has an on_setattr hook, use normal assignment after + conversion. Otherwise relegate to _setattr_with_converter. + """ + if has_on_setattr: + return _setattr_with_converter(attr_name, value_var, True) + + return "self.%s = %s(%s)" % ( + attr_name, + _init_converter_pat % (attr_name,), + value_var, + ) + + +def _attrs_to_init_script( + attrs, + frozen, + slots, + pre_init, + post_init, + cache_hash, + base_attr_map, + is_exc, + has_cls_on_setattr, + attrs_init, +): + """ + Return a script of an initializer for *attrs* and a dict of globals. + + The globals are expected by the generated script. + + If *frozen* is True, we cannot set the attributes directly so we use + a cached ``object.__setattr__``. + """ + lines = [] + if pre_init: + lines.append("self.__attrs_pre_init__()") + + if frozen is True: + if slots is True: + fmt_setter = _setattr + fmt_setter_with_converter = _setattr_with_converter + else: + # Dict frozen classes assign directly to __dict__. + # But only if the attribute doesn't come from an ancestor slot + # class. + # Note _inst_dict will be used again below if cache_hash is True + lines.append("_inst_dict = self.__dict__") + + def fmt_setter(attr_name, value_var, has_on_setattr): + if _is_slot_attr(attr_name, base_attr_map): + return _setattr(attr_name, value_var, has_on_setattr) + + return "_inst_dict['%s'] = %s" % (attr_name, value_var) + + def fmt_setter_with_converter( + attr_name, value_var, has_on_setattr + ): + if has_on_setattr or _is_slot_attr(attr_name, base_attr_map): + return _setattr_with_converter( + attr_name, value_var, has_on_setattr + ) + + return "_inst_dict['%s'] = %s(%s)" % ( + attr_name, + _init_converter_pat % (attr_name,), + value_var, + ) + + else: + # Not frozen. + fmt_setter = _assign + fmt_setter_with_converter = _assign_with_converter + + args = [] + kw_only_args = [] + attrs_to_validate = [] + + # This is a dictionary of names to validator and converter callables. + # Injecting this into __init__ globals lets us avoid lookups. + names_for_globals = {} + annotations = {"return": None} + + for a in attrs: + if a.validator: + attrs_to_validate.append(a) + + attr_name = a.name + has_on_setattr = a.on_setattr is not None or ( + a.on_setattr is not setters.NO_OP and has_cls_on_setattr + ) + arg_name = a.name.lstrip("_") + + has_factory = isinstance(a.default, Factory) + if has_factory and a.default.takes_self: + maybe_self = "self" + else: + maybe_self = "" + + if a.init is False: + if has_factory: + init_factory_name = _init_factory_pat.format(a.name) + if a.converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, + init_factory_name + "(%s)" % (maybe_self,), + has_on_setattr, + ) + ) + conv_name = _init_converter_pat % (a.name,) + names_for_globals[conv_name] = a.converter + else: + lines.append( + fmt_setter( + attr_name, + init_factory_name + "(%s)" % (maybe_self,), + has_on_setattr, + ) + ) + names_for_globals[init_factory_name] = a.default.factory + else: + if a.converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, + "attr_dict['%s'].default" % (attr_name,), + has_on_setattr, + ) + ) + conv_name = _init_converter_pat % (a.name,) + names_for_globals[conv_name] = a.converter + else: + lines.append( + fmt_setter( + attr_name, + "attr_dict['%s'].default" % (attr_name,), + has_on_setattr, + ) + ) + elif a.default is not NOTHING and not has_factory: + arg = "%s=attr_dict['%s'].default" % (arg_name, attr_name) + if a.kw_only: + kw_only_args.append(arg) + else: + args.append(arg) + + if a.converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr + ) + ) + names_for_globals[ + _init_converter_pat % (a.name,) + ] = a.converter + else: + lines.append(fmt_setter(attr_name, arg_name, has_on_setattr)) + + elif has_factory: + arg = "%s=NOTHING" % (arg_name,) + if a.kw_only: + kw_only_args.append(arg) + else: + args.append(arg) + lines.append("if %s is not NOTHING:" % (arg_name,)) + + init_factory_name = _init_factory_pat.format(a.name) + if a.converter is not None: + lines.append( + " " + + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr + ) + ) + lines.append("else:") + lines.append( + " " + + fmt_setter_with_converter( + attr_name, + init_factory_name + "(" + maybe_self + ")", + has_on_setattr, + ) + ) + names_for_globals[ + _init_converter_pat % (a.name,) + ] = a.converter + else: + lines.append( + " " + fmt_setter(attr_name, arg_name, has_on_setattr) + ) + lines.append("else:") + lines.append( + " " + + fmt_setter( + attr_name, + init_factory_name + "(" + maybe_self + ")", + has_on_setattr, + ) + ) + names_for_globals[init_factory_name] = a.default.factory + else: + if a.kw_only: + kw_only_args.append(arg_name) + else: + args.append(arg_name) + + if a.converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr + ) + ) + names_for_globals[ + _init_converter_pat % (a.name,) + ] = a.converter + else: + lines.append(fmt_setter(attr_name, arg_name, has_on_setattr)) + + if a.init is True: + if a.type is not None and a.converter is None: + annotations[arg_name] = a.type + elif a.converter is not None: + # Try to get the type from the converter. + t = _AnnotationExtractor(a.converter).get_first_param_type() + if t: + annotations[arg_name] = t + + if attrs_to_validate: # we can skip this if there are no validators. + names_for_globals["_config"] = _config + lines.append("if _config._run_validators is True:") + for a in attrs_to_validate: + val_name = "__attr_validator_" + a.name + attr_name = "__attr_" + a.name + lines.append( + " %s(self, %s, self.%s)" % (val_name, attr_name, a.name) + ) + names_for_globals[val_name] = a.validator + names_for_globals[attr_name] = a + + if post_init: + lines.append("self.__attrs_post_init__()") + + # because this is set only after __attrs_post_init__ is called, a crash + # will result if post-init tries to access the hash code. This seemed + # preferable to setting this beforehand, in which case alteration to + # field values during post-init combined with post-init accessing the + # hash code would result in silent bugs. + if cache_hash: + if frozen: + if slots: + # if frozen and slots, then _setattr defined above + init_hash_cache = "_setattr(self, '%s', %s)" + else: + # if frozen and not slots, then _inst_dict defined above + init_hash_cache = "_inst_dict['%s'] = %s" + else: + init_hash_cache = "self.%s = %s" + lines.append(init_hash_cache % (_hash_cache_field, "None")) + + # For exceptions we rely on BaseException.__init__ for proper + # initialization. + if is_exc: + vals = ",".join("self." + a.name for a in attrs if a.init) + + lines.append("BaseException.__init__(self, %s)" % (vals,)) + + args = ", ".join(args) + if kw_only_args: + args += "%s*, %s" % ( + ", " if args else "", # leading comma + ", ".join(kw_only_args), # kw_only args + ) + return ( + """\ +def {init_name}(self, {args}): + {lines} +""".format( + init_name=("__attrs_init__" if attrs_init else "__init__"), + args=args, + lines="\n ".join(lines) if lines else "pass", + ), + names_for_globals, + annotations, + ) + + +class Attribute: + """ + *Read-only* representation of an attribute. + + The class has *all* arguments of `attr.ib` (except for ``factory`` + which is only syntactic sugar for ``default=Factory(...)`` plus the + following: + + - ``name`` (`str`): The name of the attribute. + - ``inherited`` (`bool`): Whether or not that attribute has been inherited + from a base class. + - ``eq_key`` and ``order_key`` (`typing.Callable` or `None`): The callables + that are used for comparing and ordering objects by this attribute, + respectively. These are set by passing a callable to `attr.ib`'s ``eq``, + ``order``, or ``cmp`` arguments. See also :ref:`comparison customization + `. + + Instances of this class are frequently used for introspection purposes + like: + + - `fields` returns a tuple of them. + - Validators get them passed as the first argument. + - The :ref:`field transformer ` hook receives a list of + them. + + .. versionadded:: 20.1.0 *inherited* + .. versionadded:: 20.1.0 *on_setattr* + .. versionchanged:: 20.2.0 *inherited* is not taken into account for + equality checks and hashing anymore. + .. versionadded:: 21.1.0 *eq_key* and *order_key* + + For the full version history of the fields, see `attr.ib`. + """ + + __slots__ = ( + "name", + "default", + "validator", + "repr", + "eq", + "eq_key", + "order", + "order_key", + "hash", + "init", + "metadata", + "type", + "converter", + "kw_only", + "inherited", + "on_setattr", + ) + + def __init__( + self, + name, + default, + validator, + repr, + cmp, # XXX: unused, remove along with other cmp code. + hash, + init, + inherited, + metadata=None, + type=None, + converter=None, + kw_only=False, + eq=None, + eq_key=None, + order=None, + order_key=None, + on_setattr=None, + ): + eq, eq_key, order, order_key = _determine_attrib_eq_order( + cmp, eq_key or eq, order_key or order, True + ) + + # Cache this descriptor here to speed things up later. + bound_setattr = _obj_setattr.__get__(self, Attribute) + + # Despite the big red warning, people *do* instantiate `Attribute` + # themselves. + bound_setattr("name", name) + bound_setattr("default", default) + bound_setattr("validator", validator) + bound_setattr("repr", repr) + bound_setattr("eq", eq) + bound_setattr("eq_key", eq_key) + bound_setattr("order", order) + bound_setattr("order_key", order_key) + bound_setattr("hash", hash) + bound_setattr("init", init) + bound_setattr("converter", converter) + bound_setattr( + "metadata", + ( + types.MappingProxyType(dict(metadata)) # Shallow copy + if metadata + else _empty_metadata_singleton + ), + ) + bound_setattr("type", type) + bound_setattr("kw_only", kw_only) + bound_setattr("inherited", inherited) + bound_setattr("on_setattr", on_setattr) + + def __setattr__(self, name, value): + raise FrozenInstanceError() + + @classmethod + def from_counting_attr(cls, name, ca, type=None): + # type holds the annotated value. deal with conflicts: + if type is None: + type = ca.type + elif ca.type is not None: + raise ValueError( + "Type annotation and type argument cannot both be present" + ) + inst_dict = { + k: getattr(ca, k) + for k in Attribute.__slots__ + if k + not in ( + "name", + "validator", + "default", + "type", + "inherited", + ) # exclude methods and deprecated alias + } + return cls( + name=name, + validator=ca._validator, + default=ca._default, + type=type, + cmp=None, + inherited=False, + **inst_dict + ) + + # Don't use attr.evolve since fields(Attribute) doesn't work + def evolve(self, **changes): + """ + Copy *self* and apply *changes*. + + This works similarly to `attr.evolve` but that function does not work + with ``Attribute``. + + It is mainly meant to be used for `transform-fields`. + + .. versionadded:: 20.3.0 + """ + new = copy.copy(self) + + new._setattrs(changes.items()) + + return new + + # Don't use _add_pickle since fields(Attribute) doesn't work + def __getstate__(self): + """ + Play nice with pickle. + """ + return tuple( + getattr(self, name) if name != "metadata" else dict(self.metadata) + for name in self.__slots__ + ) + + def __setstate__(self, state): + """ + Play nice with pickle. + """ + self._setattrs(zip(self.__slots__, state)) + + def _setattrs(self, name_values_pairs): + bound_setattr = _obj_setattr.__get__(self, Attribute) + for name, value in name_values_pairs: + if name != "metadata": + bound_setattr(name, value) + else: + bound_setattr( + name, + types.MappingProxyType(dict(value)) + if value + else _empty_metadata_singleton, + ) + + +_a = [ + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + eq=True, + order=False, + hash=(name != "metadata"), + init=True, + inherited=False, + ) + for name in Attribute.__slots__ +] + +Attribute = _add_hash( + _add_eq( + _add_repr(Attribute, attrs=_a), + attrs=[a for a in _a if a.name != "inherited"], + ), + attrs=[a for a in _a if a.hash and a.name != "inherited"], +) + + +class _CountingAttr: + """ + Intermediate representation of attributes that uses a counter to preserve + the order in which the attributes have been defined. + + *Internal* data structure of the attrs library. Running into is most + likely the result of a bug like a forgotten `@attr.s` decorator. + """ + + __slots__ = ( + "counter", + "_default", + "repr", + "eq", + "eq_key", + "order", + "order_key", + "hash", + "init", + "metadata", + "_validator", + "converter", + "type", + "kw_only", + "on_setattr", + ) + __attrs_attrs__ = tuple( + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + hash=True, + init=True, + kw_only=False, + eq=True, + eq_key=None, + order=False, + order_key=None, + inherited=False, + on_setattr=None, + ) + for name in ( + "counter", + "_default", + "repr", + "eq", + "order", + "hash", + "init", + "on_setattr", + ) + ) + ( + Attribute( + name="metadata", + default=None, + validator=None, + repr=True, + cmp=None, + hash=False, + init=True, + kw_only=False, + eq=True, + eq_key=None, + order=False, + order_key=None, + inherited=False, + on_setattr=None, + ), + ) + cls_counter = 0 + + def __init__( + self, + default, + validator, + repr, + cmp, + hash, + init, + converter, + metadata, + type, + kw_only, + eq, + eq_key, + order, + order_key, + on_setattr, + ): + _CountingAttr.cls_counter += 1 + self.counter = _CountingAttr.cls_counter + self._default = default + self._validator = validator + self.converter = converter + self.repr = repr + self.eq = eq + self.eq_key = eq_key + self.order = order + self.order_key = order_key + self.hash = hash + self.init = init + self.metadata = metadata + self.type = type + self.kw_only = kw_only + self.on_setattr = on_setattr + + def validator(self, meth): + """ + Decorator that adds *meth* to the list of validators. + + Returns *meth* unchanged. + + .. versionadded:: 17.1.0 + """ + if self._validator is None: + self._validator = meth + else: + self._validator = and_(self._validator, meth) + return meth + + def default(self, meth): + """ + Decorator that allows to set the default for an attribute. + + Returns *meth* unchanged. + + :raises DefaultAlreadySetError: If default has been set before. + + .. versionadded:: 17.1.0 + """ + if self._default is not NOTHING: + raise DefaultAlreadySetError() + + self._default = Factory(meth, takes_self=True) + + return meth + + +_CountingAttr = _add_eq(_add_repr(_CountingAttr)) + + +class Factory: + """ + Stores a factory callable. + + If passed as the default value to `attrs.field`, the factory is used to + generate a new value. + + :param callable factory: A callable that takes either none or exactly one + mandatory positional argument depending on *takes_self*. + :param bool takes_self: Pass the partially initialized instance that is + being initialized as a positional argument. + + .. versionadded:: 17.1.0 *takes_self* + """ + + __slots__ = ("factory", "takes_self") + + def __init__(self, factory, takes_self=False): + """ + `Factory` is part of the default machinery so if we want a default + value here, we have to implement it ourselves. + """ + self.factory = factory + self.takes_self = takes_self + + def __getstate__(self): + """ + Play nice with pickle. + """ + return tuple(getattr(self, name) for name in self.__slots__) + + def __setstate__(self, state): + """ + Play nice with pickle. + """ + for name, value in zip(self.__slots__, state): + setattr(self, name, value) + + +_f = [ + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + eq=True, + order=False, + hash=True, + init=True, + inherited=False, + ) + for name in Factory.__slots__ +] + +Factory = _add_hash(_add_eq(_add_repr(Factory, attrs=_f), attrs=_f), attrs=_f) + + +def make_class(name, attrs, bases=(object,), **attributes_arguments): + """ + A quick way to create a new class called *name* with *attrs*. + + :param str name: The name for the new class. + + :param attrs: A list of names or a dictionary of mappings of names to + attributes. + + If *attrs* is a list or an ordered dict (`dict` on Python 3.6+, + `collections.OrderedDict` otherwise), the order is deduced from + the order of the names or attributes inside *attrs*. Otherwise the + order of the definition of the attributes is used. + :type attrs: `list` or `dict` + + :param tuple bases: Classes that the new class will subclass. + + :param attributes_arguments: Passed unmodified to `attr.s`. + + :return: A new class with *attrs*. + :rtype: type + + .. versionadded:: 17.1.0 *bases* + .. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained. + """ + if isinstance(attrs, dict): + cls_dict = attrs + elif isinstance(attrs, (list, tuple)): + cls_dict = {a: attrib() for a in attrs} + else: + raise TypeError("attrs argument must be a dict or a list.") + + pre_init = cls_dict.pop("__attrs_pre_init__", None) + post_init = cls_dict.pop("__attrs_post_init__", None) + user_init = cls_dict.pop("__init__", None) + + body = {} + if pre_init is not None: + body["__attrs_pre_init__"] = pre_init + if post_init is not None: + body["__attrs_post_init__"] = post_init + if user_init is not None: + body["__init__"] = user_init + + type_ = types.new_class(name, bases, {}, lambda ns: ns.update(body)) + + # For pickling to work, the __module__ variable needs to be set to the + # frame where the class is created. Bypass this step in environments where + # sys._getframe is not defined (Jython for example) or sys._getframe is not + # defined for arguments greater than 0 (IronPython). + try: + type_.__module__ = sys._getframe(1).f_globals.get( + "__name__", "__main__" + ) + except (AttributeError, ValueError): + pass + + # We do it here for proper warnings with meaningful stacklevel. + cmp = attributes_arguments.pop("cmp", None) + ( + attributes_arguments["eq"], + attributes_arguments["order"], + ) = _determine_attrs_eq_order( + cmp, + attributes_arguments.get("eq"), + attributes_arguments.get("order"), + True, + ) + + return _attrs(these=cls_dict, **attributes_arguments)(type_) + + +# These are required by within this module so we define them here and merely +# import into .validators / .converters. + + +@attrs(slots=True, hash=True) +class _AndValidator: + """ + Compose many validators to a single one. + """ + + _validators = attrib() + + def __call__(self, inst, attr, value): + for v in self._validators: + v(inst, attr, value) + + +def and_(*validators): + """ + A validator that composes multiple validators into one. + + When called on a value, it runs all wrapped validators. + + :param callables validators: Arbitrary number of validators. + + .. versionadded:: 17.1.0 + """ + vals = [] + for validator in validators: + vals.extend( + validator._validators + if isinstance(validator, _AndValidator) + else [validator] + ) + + return _AndValidator(tuple(vals)) + + +def pipe(*converters): + """ + A converter that composes multiple converters into one. + + When called on a value, it runs all wrapped converters, returning the + *last* value. + + Type annotations will be inferred from the wrapped converters', if + they have any. + + :param callables converters: Arbitrary number of converters. + + .. versionadded:: 20.1.0 + """ + + def pipe_converter(val): + for converter in converters: + val = converter(val) + + return val + + if not converters: + # If the converter list is empty, pipe_converter is the identity. + A = typing.TypeVar("A") + pipe_converter.__annotations__ = {"val": A, "return": A} + else: + # Get parameter type from first converter. + t = _AnnotationExtractor(converters[0]).get_first_param_type() + if t: + pipe_converter.__annotations__["val"] = t + + # Get return type from last converter. + rt = _AnnotationExtractor(converters[-1]).get_return_type() + if rt: + pipe_converter.__annotations__["return"] = rt + + return pipe_converter diff --git a/venv/lib/python3.12/site-packages/attr/_next_gen.py b/venv/lib/python3.12/site-packages/attr/_next_gen.py new file mode 100644 index 0000000000..5a06a74385 --- /dev/null +++ b/venv/lib/python3.12/site-packages/attr/_next_gen.py @@ -0,0 +1,220 @@ +# SPDX-License-Identifier: MIT + +""" +These are Python 3.6+-only and keyword-only APIs that call `attr.s` and +`attr.ib` with different default values. +""" + + +from functools import partial + +from . import setters +from ._funcs import asdict as _asdict +from ._funcs import astuple as _astuple +from ._make import ( + NOTHING, + _frozen_setattrs, + _ng_default_on_setattr, + attrib, + attrs, +) +from .exceptions import UnannotatedAttributeError + + +def define( + maybe_cls=None, + *, + these=None, + repr=None, + hash=None, + init=None, + slots=True, + frozen=False, + weakref_slot=True, + str=False, + auto_attribs=None, + kw_only=False, + cache_hash=False, + auto_exc=True, + eq=None, + order=False, + auto_detect=True, + getstate_setstate=None, + on_setattr=None, + field_transformer=None, + match_args=True, +): + r""" + Define an ``attrs`` class. + + Differences to the classic `attr.s` that it uses underneath: + + - Automatically detect whether or not *auto_attribs* should be `True` (c.f. + *auto_attribs* parameter). + - If *frozen* is `False`, run converters and validators when setting an + attribute by default. + - *slots=True* + + .. caution:: + + Usually this has only upsides and few visible effects in everyday + programming. But it *can* lead to some suprising behaviors, so please + make sure to read :term:`slotted classes`. + - *auto_exc=True* + - *auto_detect=True* + - *order=False* + - Some options that were only relevant on Python 2 or were kept around for + backwards-compatibility have been removed. + + Please note that these are all defaults and you can change them as you + wish. + + :param Optional[bool] auto_attribs: If set to `True` or `False`, it behaves + exactly like `attr.s`. If left `None`, `attr.s` will try to guess: + + 1. If any attributes are annotated and no unannotated `attrs.fields`\ s + are found, it assumes *auto_attribs=True*. + 2. Otherwise it assumes *auto_attribs=False* and tries to collect + `attrs.fields`\ s. + + For now, please refer to `attr.s` for the rest of the parameters. + + .. versionadded:: 20.1.0 + .. versionchanged:: 21.3.0 Converters are also run ``on_setattr``. + """ + + def do_it(cls, auto_attribs): + return attrs( + maybe_cls=cls, + these=these, + repr=repr, + hash=hash, + init=init, + slots=slots, + frozen=frozen, + weakref_slot=weakref_slot, + str=str, + auto_attribs=auto_attribs, + kw_only=kw_only, + cache_hash=cache_hash, + auto_exc=auto_exc, + eq=eq, + order=order, + auto_detect=auto_detect, + collect_by_mro=True, + getstate_setstate=getstate_setstate, + on_setattr=on_setattr, + field_transformer=field_transformer, + match_args=match_args, + ) + + def wrap(cls): + """ + Making this a wrapper ensures this code runs during class creation. + + We also ensure that frozen-ness of classes is inherited. + """ + nonlocal frozen, on_setattr + + had_on_setattr = on_setattr not in (None, setters.NO_OP) + + # By default, mutable classes convert & validate on setattr. + if frozen is False and on_setattr is None: + on_setattr = _ng_default_on_setattr + + # However, if we subclass a frozen class, we inherit the immutability + # and disable on_setattr. + for base_cls in cls.__bases__: + if base_cls.__setattr__ is _frozen_setattrs: + if had_on_setattr: + raise ValueError( + "Frozen classes can't use on_setattr " + "(frozen-ness was inherited)." + ) + + on_setattr = setters.NO_OP + break + + if auto_attribs is not None: + return do_it(cls, auto_attribs) + + try: + return do_it(cls, True) + except UnannotatedAttributeError: + return do_it(cls, False) + + # maybe_cls's type depends on the usage of the decorator. It's a class + # if it's used as `@attrs` but ``None`` if used as `@attrs()`. + if maybe_cls is None: + return wrap + else: + return wrap(maybe_cls) + + +mutable = define +frozen = partial(define, frozen=True, on_setattr=None) + + +def field( + *, + default=NOTHING, + validator=None, + repr=True, + hash=None, + init=True, + metadata=None, + converter=None, + factory=None, + kw_only=False, + eq=None, + order=None, + on_setattr=None, +): + """ + Identical to `attr.ib`, except keyword-only and with some arguments + removed. + + .. versionadded:: 20.1.0 + """ + return attrib( + default=default, + validator=validator, + repr=repr, + hash=hash, + init=init, + metadata=metadata, + converter=converter, + factory=factory, + kw_only=kw_only, + eq=eq, + order=order, + on_setattr=on_setattr, + ) + + +def asdict(inst, *, recurse=True, filter=None, value_serializer=None): + """ + Same as `attr.asdict`, except that collections types are always retained + and dict is always used as *dict_factory*. + + .. versionadded:: 21.3.0 + """ + return _asdict( + inst=inst, + recurse=recurse, + filter=filter, + value_serializer=value_serializer, + retain_collection_types=True, + ) + + +def astuple(inst, *, recurse=True, filter=None): + """ + Same as `attr.astuple`, except that collections types are always retained + and `tuple` is always used as the *tuple_factory*. + + .. versionadded:: 21.3.0 + """ + return _astuple( + inst=inst, recurse=recurse, filter=filter, retain_collection_types=True + ) diff --git a/venv/lib/python3.12/site-packages/attr/_version_info.py b/venv/lib/python3.12/site-packages/attr/_version_info.py new file mode 100644 index 0000000000..51a1312f97 --- /dev/null +++ b/venv/lib/python3.12/site-packages/attr/_version_info.py @@ -0,0 +1,86 @@ +# SPDX-License-Identifier: MIT + + +from functools import total_ordering + +from ._funcs import astuple +from ._make import attrib, attrs + + +@total_ordering +@attrs(eq=False, order=False, slots=True, frozen=True) +class VersionInfo: + """ + A version object that can be compared to tuple of length 1--4: + + >>> attr.VersionInfo(19, 1, 0, "final") <= (19, 2) + True + >>> attr.VersionInfo(19, 1, 0, "final") < (19, 1, 1) + True + >>> vi = attr.VersionInfo(19, 2, 0, "final") + >>> vi < (19, 1, 1) + False + >>> vi < (19,) + False + >>> vi == (19, 2,) + True + >>> vi == (19, 2, 1) + False + + .. versionadded:: 19.2 + """ + + year = attrib(type=int) + minor = attrib(type=int) + micro = attrib(type=int) + releaselevel = attrib(type=str) + + @classmethod + def _from_version_string(cls, s): + """ + Parse *s* and return a _VersionInfo. + """ + v = s.split(".") + if len(v) == 3: + v.append("final") + + return cls( + year=int(v[0]), minor=int(v[1]), micro=int(v[2]), releaselevel=v[3] + ) + + def _ensure_tuple(self, other): + """ + Ensure *other* is a tuple of a valid length. + + Returns a possibly transformed *other* and ourselves as a tuple of + the same length as *other*. + """ + + if self.__class__ is other.__class__: + other = astuple(other) + + if not isinstance(other, tuple): + raise NotImplementedError + + if not (1 <= len(other) <= 4): + raise NotImplementedError + + return astuple(self)[: len(other)], other + + def __eq__(self, other): + try: + us, them = self._ensure_tuple(other) + except NotImplementedError: + return NotImplemented + + return us == them + + def __lt__(self, other): + try: + us, them = self._ensure_tuple(other) + except NotImplementedError: + return NotImplemented + + # Since alphabetically "dev0" < "final" < "post1" < "post2", we don't + # have to do anything special with releaselevel for now. + return us < them diff --git a/venv/lib/python3.12/site-packages/attr/_version_info.pyi b/venv/lib/python3.12/site-packages/attr/_version_info.pyi new file mode 100644 index 0000000000..45ced08633 --- /dev/null +++ b/venv/lib/python3.12/site-packages/attr/_version_info.pyi @@ -0,0 +1,9 @@ +class VersionInfo: + @property + def year(self) -> int: ... + @property + def minor(self) -> int: ... + @property + def micro(self) -> int: ... + @property + def releaselevel(self) -> str: ... diff --git a/venv/lib/python3.12/site-packages/attr/converters.py b/venv/lib/python3.12/site-packages/attr/converters.py new file mode 100644 index 0000000000..a73626c26d --- /dev/null +++ b/venv/lib/python3.12/site-packages/attr/converters.py @@ -0,0 +1,144 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly useful converters. +""" + + +import typing + +from ._compat import _AnnotationExtractor +from ._make import NOTHING, Factory, pipe + + +__all__ = [ + "default_if_none", + "optional", + "pipe", + "to_bool", +] + + +def optional(converter): + """ + A converter that allows an attribute to be optional. An optional attribute + is one which can be set to ``None``. + + Type annotations will be inferred from the wrapped converter's, if it + has any. + + :param callable converter: the converter that is used for non-``None`` + values. + + .. versionadded:: 17.1.0 + """ + + def optional_converter(val): + if val is None: + return None + return converter(val) + + xtr = _AnnotationExtractor(converter) + + t = xtr.get_first_param_type() + if t: + optional_converter.__annotations__["val"] = typing.Optional[t] + + rt = xtr.get_return_type() + if rt: + optional_converter.__annotations__["return"] = typing.Optional[rt] + + return optional_converter + + +def default_if_none(default=NOTHING, factory=None): + """ + A converter that allows to replace ``None`` values by *default* or the + result of *factory*. + + :param default: Value to be used if ``None`` is passed. Passing an instance + of `attrs.Factory` is supported, however the ``takes_self`` option + is *not*. + :param callable factory: A callable that takes no parameters whose result + is used if ``None`` is passed. + + :raises TypeError: If **neither** *default* or *factory* is passed. + :raises TypeError: If **both** *default* and *factory* are passed. + :raises ValueError: If an instance of `attrs.Factory` is passed with + ``takes_self=True``. + + .. versionadded:: 18.2.0 + """ + if default is NOTHING and factory is None: + raise TypeError("Must pass either `default` or `factory`.") + + if default is not NOTHING and factory is not None: + raise TypeError( + "Must pass either `default` or `factory` but not both." + ) + + if factory is not None: + default = Factory(factory) + + if isinstance(default, Factory): + if default.takes_self: + raise ValueError( + "`takes_self` is not supported by default_if_none." + ) + + def default_if_none_converter(val): + if val is not None: + return val + + return default.factory() + + else: + + def default_if_none_converter(val): + if val is not None: + return val + + return default + + return default_if_none_converter + + +def to_bool(val): + """ + Convert "boolean" strings (e.g., from env. vars.) to real booleans. + + Values mapping to :code:`True`: + + - :code:`True` + - :code:`"true"` / :code:`"t"` + - :code:`"yes"` / :code:`"y"` + - :code:`"on"` + - :code:`"1"` + - :code:`1` + + Values mapping to :code:`False`: + + - :code:`False` + - :code:`"false"` / :code:`"f"` + - :code:`"no"` / :code:`"n"` + - :code:`"off"` + - :code:`"0"` + - :code:`0` + + :raises ValueError: for any other value. + + .. versionadded:: 21.3.0 + """ + if isinstance(val, str): + val = val.lower() + truthy = {True, "true", "t", "yes", "y", "on", "1", 1} + falsy = {False, "false", "f", "no", "n", "off", "0", 0} + try: + if val in truthy: + return True + if val in falsy: + return False + except TypeError: + # Raised when "val" is not hashable (e.g., lists) + pass + raise ValueError("Cannot convert value to bool: {}".format(val)) diff --git a/venv/lib/python3.12/site-packages/attr/converters.pyi b/venv/lib/python3.12/site-packages/attr/converters.pyi new file mode 100644 index 0000000000..0f58088a37 --- /dev/null +++ b/venv/lib/python3.12/site-packages/attr/converters.pyi @@ -0,0 +1,13 @@ +from typing import Callable, Optional, TypeVar, overload + +from . import _ConverterType + +_T = TypeVar("_T") + +def pipe(*validators: _ConverterType) -> _ConverterType: ... +def optional(converter: _ConverterType) -> _ConverterType: ... +@overload +def default_if_none(default: _T) -> _ConverterType: ... +@overload +def default_if_none(*, factory: Callable[[], _T]) -> _ConverterType: ... +def to_bool(val: str) -> bool: ... diff --git a/venv/lib/python3.12/site-packages/attr/exceptions.py b/venv/lib/python3.12/site-packages/attr/exceptions.py new file mode 100644 index 0000000000..5dc51e0a82 --- /dev/null +++ b/venv/lib/python3.12/site-packages/attr/exceptions.py @@ -0,0 +1,92 @@ +# SPDX-License-Identifier: MIT + + +class FrozenError(AttributeError): + """ + A frozen/immutable instance or attribute have been attempted to be + modified. + + It mirrors the behavior of ``namedtuples`` by using the same error message + and subclassing `AttributeError`. + + .. versionadded:: 20.1.0 + """ + + msg = "can't set attribute" + args = [msg] + + +class FrozenInstanceError(FrozenError): + """ + A frozen instance has been attempted to be modified. + + .. versionadded:: 16.1.0 + """ + + +class FrozenAttributeError(FrozenError): + """ + A frozen attribute has been attempted to be modified. + + .. versionadded:: 20.1.0 + """ + + +class AttrsAttributeNotFoundError(ValueError): + """ + An ``attrs`` function couldn't find an attribute that the user asked for. + + .. versionadded:: 16.2.0 + """ + + +class NotAnAttrsClassError(ValueError): + """ + A non-``attrs`` class has been passed into an ``attrs`` function. + + .. versionadded:: 16.2.0 + """ + + +class DefaultAlreadySetError(RuntimeError): + """ + A default has been set using ``attr.ib()`` and is attempted to be reset + using the decorator. + + .. versionadded:: 17.1.0 + """ + + +class UnannotatedAttributeError(RuntimeError): + """ + A class with ``auto_attribs=True`` has an ``attr.ib()`` without a type + annotation. + + .. versionadded:: 17.3.0 + """ + + +class PythonTooOldError(RuntimeError): + """ + It was attempted to use an ``attrs`` feature that requires a newer Python + version. + + .. versionadded:: 18.2.0 + """ + + +class NotCallableError(TypeError): + """ + A ``attr.ib()`` requiring a callable has been set with a value + that is not callable. + + .. versionadded:: 19.2.0 + """ + + def __init__(self, msg, value): + super(TypeError, self).__init__(msg, value) + self.msg = msg + self.value = value + + def __str__(self): + return str(self.msg) diff --git a/venv/lib/python3.12/site-packages/attr/exceptions.pyi b/venv/lib/python3.12/site-packages/attr/exceptions.pyi new file mode 100644 index 0000000000..f2680118b4 --- /dev/null +++ b/venv/lib/python3.12/site-packages/attr/exceptions.pyi @@ -0,0 +1,17 @@ +from typing import Any + +class FrozenError(AttributeError): + msg: str = ... + +class FrozenInstanceError(FrozenError): ... +class FrozenAttributeError(FrozenError): ... +class AttrsAttributeNotFoundError(ValueError): ... +class NotAnAttrsClassError(ValueError): ... +class DefaultAlreadySetError(RuntimeError): ... +class UnannotatedAttributeError(RuntimeError): ... +class PythonTooOldError(RuntimeError): ... + +class NotCallableError(TypeError): + msg: str = ... + value: Any = ... + def __init__(self, msg: str, value: Any) -> None: ... diff --git a/venv/lib/python3.12/site-packages/attr/filters.py b/venv/lib/python3.12/site-packages/attr/filters.py new file mode 100644 index 0000000000..baa25e9465 --- /dev/null +++ b/venv/lib/python3.12/site-packages/attr/filters.py @@ -0,0 +1,51 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly useful filters for `attr.asdict`. +""" + +from ._make import Attribute + + +def _split_what(what): + """ + Returns a tuple of `frozenset`s of classes and attributes. + """ + return ( + frozenset(cls for cls in what if isinstance(cls, type)), + frozenset(cls for cls in what if isinstance(cls, Attribute)), + ) + + +def include(*what): + """ + Include *what*. + + :param what: What to include. + :type what: `list` of `type` or `attrs.Attribute`\\ s + + :rtype: `callable` + """ + cls, attrs = _split_what(what) + + def include_(attribute, value): + return value.__class__ in cls or attribute in attrs + + return include_ + + +def exclude(*what): + """ + Exclude *what*. + + :param what: What to exclude. + :type what: `list` of classes or `attrs.Attribute`\\ s. + + :rtype: `callable` + """ + cls, attrs = _split_what(what) + + def exclude_(attribute, value): + return value.__class__ not in cls and attribute not in attrs + + return exclude_ diff --git a/venv/lib/python3.12/site-packages/attr/filters.pyi b/venv/lib/python3.12/site-packages/attr/filters.pyi new file mode 100644 index 0000000000..993866865e --- /dev/null +++ b/venv/lib/python3.12/site-packages/attr/filters.pyi @@ -0,0 +1,6 @@ +from typing import Any, Union + +from . import Attribute, _FilterType + +def include(*what: Union[type, Attribute[Any]]) -> _FilterType[Any]: ... +def exclude(*what: Union[type, Attribute[Any]]) -> _FilterType[Any]: ... diff --git a/venv/lib/python3.12/site-packages/attr/py.typed b/venv/lib/python3.12/site-packages/attr/py.typed new file mode 100644 index 0000000000..e69de29bb2 diff --git a/venv/lib/python3.12/site-packages/attr/setters.py b/venv/lib/python3.12/site-packages/attr/setters.py new file mode 100644 index 0000000000..12ed6750df --- /dev/null +++ b/venv/lib/python3.12/site-packages/attr/setters.py @@ -0,0 +1,73 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly used hooks for on_setattr. +""" + + +from . import _config +from .exceptions import FrozenAttributeError + + +def pipe(*setters): + """ + Run all *setters* and return the return value of the last one. + + .. versionadded:: 20.1.0 + """ + + def wrapped_pipe(instance, attrib, new_value): + rv = new_value + + for setter in setters: + rv = setter(instance, attrib, rv) + + return rv + + return wrapped_pipe + + +def frozen(_, __, ___): + """ + Prevent an attribute to be modified. + + .. versionadded:: 20.1.0 + """ + raise FrozenAttributeError() + + +def validate(instance, attrib, new_value): + """ + Run *attrib*'s validator on *new_value* if it has one. + + .. versionadded:: 20.1.0 + """ + if _config._run_validators is False: + return new_value + + v = attrib.validator + if not v: + return new_value + + v(instance, attrib, new_value) + + return new_value + + +def convert(instance, attrib, new_value): + """ + Run *attrib*'s converter -- if it has one -- on *new_value* and return the + result. + + .. versionadded:: 20.1.0 + """ + c = attrib.converter + if c: + return c(new_value) + + return new_value + + +# Sentinel for disabling class-wide *on_setattr* hooks for certain attributes. +# autodata stopped working, so the docstring is inlined in the API docs. +NO_OP = object() diff --git a/venv/lib/python3.12/site-packages/attr/setters.pyi b/venv/lib/python3.12/site-packages/attr/setters.pyi new file mode 100644 index 0000000000..3f5603c2b0 --- /dev/null +++ b/venv/lib/python3.12/site-packages/attr/setters.pyi @@ -0,0 +1,19 @@ +from typing import Any, NewType, NoReturn, TypeVar, cast + +from . import Attribute, _OnSetAttrType + +_T = TypeVar("_T") + +def frozen( + instance: Any, attribute: Attribute[Any], new_value: Any +) -> NoReturn: ... +def pipe(*setters: _OnSetAttrType) -> _OnSetAttrType: ... +def validate(instance: Any, attribute: Attribute[_T], new_value: _T) -> _T: ... + +# convert is allowed to return Any, because they can be chained using pipe. +def convert( + instance: Any, attribute: Attribute[Any], new_value: Any +) -> Any: ... + +_NoOpType = NewType("_NoOpType", object) +NO_OP: _NoOpType diff --git a/venv/lib/python3.12/site-packages/attr/validators.py b/venv/lib/python3.12/site-packages/attr/validators.py new file mode 100644 index 0000000000..eece517da8 --- /dev/null +++ b/venv/lib/python3.12/site-packages/attr/validators.py @@ -0,0 +1,594 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly useful validators. +""" + + +import operator +import re + +from contextlib import contextmanager + +from ._config import get_run_validators, set_run_validators +from ._make import _AndValidator, and_, attrib, attrs +from .exceptions import NotCallableError + + +try: + Pattern = re.Pattern +except AttributeError: # Python <3.7 lacks a Pattern type. + Pattern = type(re.compile("")) + + +__all__ = [ + "and_", + "deep_iterable", + "deep_mapping", + "disabled", + "ge", + "get_disabled", + "gt", + "in_", + "instance_of", + "is_callable", + "le", + "lt", + "matches_re", + "max_len", + "min_len", + "optional", + "provides", + "set_disabled", +] + + +def set_disabled(disabled): + """ + Globally disable or enable running validators. + + By default, they are run. + + :param disabled: If ``True``, disable running all validators. + :type disabled: bool + + .. warning:: + + This function is not thread-safe! + + .. versionadded:: 21.3.0 + """ + set_run_validators(not disabled) + + +def get_disabled(): + """ + Return a bool indicating whether validators are currently disabled or not. + + :return: ``True`` if validators are currently disabled. + :rtype: bool + + .. versionadded:: 21.3.0 + """ + return not get_run_validators() + + +@contextmanager +def disabled(): + """ + Context manager that disables running validators within its context. + + .. warning:: + + This context manager is not thread-safe! + + .. versionadded:: 21.3.0 + """ + set_run_validators(False) + try: + yield + finally: + set_run_validators(True) + + +@attrs(repr=False, slots=True, hash=True) +class _InstanceOfValidator: + type = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not isinstance(value, self.type): + raise TypeError( + "'{name}' must be {type!r} (got {value!r} that is a " + "{actual!r}).".format( + name=attr.name, + type=self.type, + actual=value.__class__, + value=value, + ), + attr, + self.type, + value, + ) + + def __repr__(self): + return "".format( + type=self.type + ) + + +def instance_of(type): + """ + A validator that raises a `TypeError` if the initializer is called + with a wrong type for this particular attribute (checks are performed using + `isinstance` therefore it's also valid to pass a tuple of types). + + :param type: The type to check for. + :type type: type or tuple of types + + :raises TypeError: With a human readable error message, the attribute + (of type `attrs.Attribute`), the expected type, and the value it + got. + """ + return _InstanceOfValidator(type) + + +@attrs(repr=False, frozen=True, slots=True) +class _MatchesReValidator: + pattern = attrib() + match_func = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not self.match_func(value): + raise ValueError( + "'{name}' must match regex {pattern!r}" + " ({value!r} doesn't)".format( + name=attr.name, pattern=self.pattern.pattern, value=value + ), + attr, + self.pattern, + value, + ) + + def __repr__(self): + return "".format( + pattern=self.pattern + ) + + +def matches_re(regex, flags=0, func=None): + r""" + A validator that raises `ValueError` if the initializer is called + with a string that doesn't match *regex*. + + :param regex: a regex string or precompiled pattern to match against + :param int flags: flags that will be passed to the underlying re function + (default 0) + :param callable func: which underlying `re` function to call. Valid options + are `re.fullmatch`, `re.search`, and `re.match`; the default ``None`` + means `re.fullmatch`. For performance reasons, the pattern is always + precompiled using `re.compile`. + + .. versionadded:: 19.2.0 + .. versionchanged:: 21.3.0 *regex* can be a pre-compiled pattern. + """ + valid_funcs = (re.fullmatch, None, re.search, re.match) + if func not in valid_funcs: + raise ValueError( + "'func' must be one of {}.".format( + ", ".join( + sorted( + e and e.__name__ or "None" for e in set(valid_funcs) + ) + ) + ) + ) + + if isinstance(regex, Pattern): + if flags: + raise TypeError( + "'flags' can only be used with a string pattern; " + "pass flags to re.compile() instead" + ) + pattern = regex + else: + pattern = re.compile(regex, flags) + + if func is re.match: + match_func = pattern.match + elif func is re.search: + match_func = pattern.search + else: + match_func = pattern.fullmatch + + return _MatchesReValidator(pattern, match_func) + + +@attrs(repr=False, slots=True, hash=True) +class _ProvidesValidator: + interface = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not self.interface.providedBy(value): + raise TypeError( + "'{name}' must provide {interface!r} which {value!r} " + "doesn't.".format( + name=attr.name, interface=self.interface, value=value + ), + attr, + self.interface, + value, + ) + + def __repr__(self): + return "".format( + interface=self.interface + ) + + +def provides(interface): + """ + A validator that raises a `TypeError` if the initializer is called + with an object that does not provide the requested *interface* (checks are + performed using ``interface.providedBy(value)`` (see `zope.interface + `_). + + :param interface: The interface to check for. + :type interface: ``zope.interface.Interface`` + + :raises TypeError: With a human readable error message, the attribute + (of type `attrs.Attribute`), the expected interface, and the + value it got. + """ + return _ProvidesValidator(interface) + + +@attrs(repr=False, slots=True, hash=True) +class _OptionalValidator: + validator = attrib() + + def __call__(self, inst, attr, value): + if value is None: + return + + self.validator(inst, attr, value) + + def __repr__(self): + return "".format( + what=repr(self.validator) + ) + + +def optional(validator): + """ + A validator that makes an attribute optional. An optional attribute is one + which can be set to ``None`` in addition to satisfying the requirements of + the sub-validator. + + :param validator: A validator (or a list of validators) that is used for + non-``None`` values. + :type validator: callable or `list` of callables. + + .. versionadded:: 15.1.0 + .. versionchanged:: 17.1.0 *validator* can be a list of validators. + """ + if isinstance(validator, list): + return _OptionalValidator(_AndValidator(validator)) + return _OptionalValidator(validator) + + +@attrs(repr=False, slots=True, hash=True) +class _InValidator: + options = attrib() + + def __call__(self, inst, attr, value): + try: + in_options = value in self.options + except TypeError: # e.g. `1 in "abc"` + in_options = False + + if not in_options: + raise ValueError( + "'{name}' must be in {options!r} (got {value!r})".format( + name=attr.name, options=self.options, value=value + ), + attr, + self.options, + value, + ) + + def __repr__(self): + return "".format( + options=self.options + ) + + +def in_(options): + """ + A validator that raises a `ValueError` if the initializer is called + with a value that does not belong in the options provided. The check is + performed using ``value in options``. + + :param options: Allowed options. + :type options: list, tuple, `enum.Enum`, ... + + :raises ValueError: With a human readable error message, the attribute (of + type `attrs.Attribute`), the expected options, and the value it + got. + + .. versionadded:: 17.1.0 + .. versionchanged:: 22.1.0 + The ValueError was incomplete until now and only contained the human + readable error message. Now it contains all the information that has + been promised since 17.1.0. + """ + return _InValidator(options) + + +@attrs(repr=False, slots=False, hash=True) +class _IsCallableValidator: + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not callable(value): + message = ( + "'{name}' must be callable " + "(got {value!r} that is a {actual!r})." + ) + raise NotCallableError( + msg=message.format( + name=attr.name, value=value, actual=value.__class__ + ), + value=value, + ) + + def __repr__(self): + return "" + + +def is_callable(): + """ + A validator that raises a `attr.exceptions.NotCallableError` if the + initializer is called with a value for this particular attribute + that is not callable. + + .. versionadded:: 19.1.0 + + :raises `attr.exceptions.NotCallableError`: With a human readable error + message containing the attribute (`attrs.Attribute`) name, + and the value it got. + """ + return _IsCallableValidator() + + +@attrs(repr=False, slots=True, hash=True) +class _DeepIterable: + member_validator = attrib(validator=is_callable()) + iterable_validator = attrib( + default=None, validator=optional(is_callable()) + ) + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if self.iterable_validator is not None: + self.iterable_validator(inst, attr, value) + + for member in value: + self.member_validator(inst, attr, member) + + def __repr__(self): + iterable_identifier = ( + "" + if self.iterable_validator is None + else " {iterable!r}".format(iterable=self.iterable_validator) + ) + return ( + "" + ).format( + iterable_identifier=iterable_identifier, + member=self.member_validator, + ) + + +def deep_iterable(member_validator, iterable_validator=None): + """ + A validator that performs deep validation of an iterable. + + :param member_validator: Validator(s) to apply to iterable members + :param iterable_validator: Validator to apply to iterable itself + (optional) + + .. versionadded:: 19.1.0 + + :raises TypeError: if any sub-validators fail + """ + if isinstance(member_validator, (list, tuple)): + member_validator = and_(*member_validator) + return _DeepIterable(member_validator, iterable_validator) + + +@attrs(repr=False, slots=True, hash=True) +class _DeepMapping: + key_validator = attrib(validator=is_callable()) + value_validator = attrib(validator=is_callable()) + mapping_validator = attrib(default=None, validator=optional(is_callable())) + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if self.mapping_validator is not None: + self.mapping_validator(inst, attr, value) + + for key in value: + self.key_validator(inst, attr, key) + self.value_validator(inst, attr, value[key]) + + def __repr__(self): + return ( + "" + ).format(key=self.key_validator, value=self.value_validator) + + +def deep_mapping(key_validator, value_validator, mapping_validator=None): + """ + A validator that performs deep validation of a dictionary. + + :param key_validator: Validator to apply to dictionary keys + :param value_validator: Validator to apply to dictionary values + :param mapping_validator: Validator to apply to top-level mapping + attribute (optional) + + .. versionadded:: 19.1.0 + + :raises TypeError: if any sub-validators fail + """ + return _DeepMapping(key_validator, value_validator, mapping_validator) + + +@attrs(repr=False, frozen=True, slots=True) +class _NumberValidator: + bound = attrib() + compare_op = attrib() + compare_func = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not self.compare_func(value, self.bound): + raise ValueError( + "'{name}' must be {op} {bound}: {value}".format( + name=attr.name, + op=self.compare_op, + bound=self.bound, + value=value, + ) + ) + + def __repr__(self): + return "".format( + op=self.compare_op, bound=self.bound + ) + + +def lt(val): + """ + A validator that raises `ValueError` if the initializer is called + with a number larger or equal to *val*. + + :param val: Exclusive upper bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, "<", operator.lt) + + +def le(val): + """ + A validator that raises `ValueError` if the initializer is called + with a number greater than *val*. + + :param val: Inclusive upper bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, "<=", operator.le) + + +def ge(val): + """ + A validator that raises `ValueError` if the initializer is called + with a number smaller than *val*. + + :param val: Inclusive lower bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, ">=", operator.ge) + + +def gt(val): + """ + A validator that raises `ValueError` if the initializer is called + with a number smaller or equal to *val*. + + :param val: Exclusive lower bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, ">", operator.gt) + + +@attrs(repr=False, frozen=True, slots=True) +class _MaxLengthValidator: + max_length = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if len(value) > self.max_length: + raise ValueError( + "Length of '{name}' must be <= {max}: {len}".format( + name=attr.name, max=self.max_length, len=len(value) + ) + ) + + def __repr__(self): + return "".format(max=self.max_length) + + +def max_len(length): + """ + A validator that raises `ValueError` if the initializer is called + with a string or iterable that is longer than *length*. + + :param int length: Maximum length of the string or iterable + + .. versionadded:: 21.3.0 + """ + return _MaxLengthValidator(length) + + +@attrs(repr=False, frozen=True, slots=True) +class _MinLengthValidator: + min_length = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if len(value) < self.min_length: + raise ValueError( + "Length of '{name}' must be => {min}: {len}".format( + name=attr.name, min=self.min_length, len=len(value) + ) + ) + + def __repr__(self): + return "".format(min=self.min_length) + + +def min_len(length): + """ + A validator that raises `ValueError` if the initializer is called + with a string or iterable that is shorter than *length*. + + :param int length: Minimum length of the string or iterable + + .. versionadded:: 22.1.0 + """ + return _MinLengthValidator(length) diff --git a/venv/lib/python3.12/site-packages/attr/validators.pyi b/venv/lib/python3.12/site-packages/attr/validators.pyi new file mode 100644 index 0000000000..54b9dba24e --- /dev/null +++ b/venv/lib/python3.12/site-packages/attr/validators.pyi @@ -0,0 +1,80 @@ +from typing import ( + Any, + AnyStr, + Callable, + Container, + ContextManager, + Iterable, + List, + Mapping, + Match, + Optional, + Pattern, + Tuple, + Type, + TypeVar, + Union, + overload, +) + +from . import _ValidatorType +from . import _ValidatorArgType + +_T = TypeVar("_T") +_T1 = TypeVar("_T1") +_T2 = TypeVar("_T2") +_T3 = TypeVar("_T3") +_I = TypeVar("_I", bound=Iterable) +_K = TypeVar("_K") +_V = TypeVar("_V") +_M = TypeVar("_M", bound=Mapping) + +def set_disabled(run: bool) -> None: ... +def get_disabled() -> bool: ... +def disabled() -> ContextManager[None]: ... + +# To be more precise on instance_of use some overloads. +# If there are more than 3 items in the tuple then we fall back to Any +@overload +def instance_of(type: Type[_T]) -> _ValidatorType[_T]: ... +@overload +def instance_of(type: Tuple[Type[_T]]) -> _ValidatorType[_T]: ... +@overload +def instance_of( + type: Tuple[Type[_T1], Type[_T2]] +) -> _ValidatorType[Union[_T1, _T2]]: ... +@overload +def instance_of( + type: Tuple[Type[_T1], Type[_T2], Type[_T3]] +) -> _ValidatorType[Union[_T1, _T2, _T3]]: ... +@overload +def instance_of(type: Tuple[type, ...]) -> _ValidatorType[Any]: ... +def provides(interface: Any) -> _ValidatorType[Any]: ... +def optional( + validator: Union[_ValidatorType[_T], List[_ValidatorType[_T]]] +) -> _ValidatorType[Optional[_T]]: ... +def in_(options: Container[_T]) -> _ValidatorType[_T]: ... +def and_(*validators: _ValidatorType[_T]) -> _ValidatorType[_T]: ... +def matches_re( + regex: Union[Pattern[AnyStr], AnyStr], + flags: int = ..., + func: Optional[ + Callable[[AnyStr, AnyStr, int], Optional[Match[AnyStr]]] + ] = ..., +) -> _ValidatorType[AnyStr]: ... +def deep_iterable( + member_validator: _ValidatorArgType[_T], + iterable_validator: Optional[_ValidatorType[_I]] = ..., +) -> _ValidatorType[_I]: ... +def deep_mapping( + key_validator: _ValidatorType[_K], + value_validator: _ValidatorType[_V], + mapping_validator: Optional[_ValidatorType[_M]] = ..., +) -> _ValidatorType[_M]: ... +def is_callable() -> _ValidatorType[_T]: ... +def lt(val: _T) -> _ValidatorType[_T]: ... +def le(val: _T) -> _ValidatorType[_T]: ... +def ge(val: _T) -> _ValidatorType[_T]: ... +def gt(val: _T) -> _ValidatorType[_T]: ... +def max_len(length: int) -> _ValidatorType[_T]: ... +def min_len(length: int) -> _ValidatorType[_T]: ... diff --git a/venv/lib/python3.12/site-packages/attrs-22.1.0.dist-info/AUTHORS.rst b/venv/lib/python3.12/site-packages/attrs-22.1.0.dist-info/AUTHORS.rst new file mode 100644 index 0000000000..aa677e81d1 --- /dev/null +++ b/venv/lib/python3.12/site-packages/attrs-22.1.0.dist-info/AUTHORS.rst @@ -0,0 +1,11 @@ +Credits +======= + +``attrs`` is written and maintained by `Hynek Schlawack `_. + +The development is kindly supported by `Variomedia AG `_. + +A full list of contributors can be found in `GitHub's overview `_. + +It’s the spiritual successor of `characteristic `_ and aspires to fix some of it clunkiness and unfortunate decisions. +Both were inspired by Twisted’s `FancyEqMixin `_ but both are implemented using class decorators because `subclassing is bad for you `_, m’kay? diff --git a/venv/lib/python3.12/site-packages/attrs-22.1.0.dist-info/INSTALLER b/venv/lib/python3.12/site-packages/attrs-22.1.0.dist-info/INSTALLER new file mode 100644 index 0000000000..a1b589e38a --- /dev/null +++ b/venv/lib/python3.12/site-packages/attrs-22.1.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.12/site-packages/attrs-22.1.0.dist-info/LICENSE b/venv/lib/python3.12/site-packages/attrs-22.1.0.dist-info/LICENSE new file mode 100644 index 0000000000..2bd6453d25 --- /dev/null +++ b/venv/lib/python3.12/site-packages/attrs-22.1.0.dist-info/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Hynek Schlawack and the attrs contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/venv/lib/python3.12/site-packages/attrs-22.1.0.dist-info/METADATA b/venv/lib/python3.12/site-packages/attrs-22.1.0.dist-info/METADATA new file mode 100644 index 0000000000..60b6653e31 --- /dev/null +++ b/venv/lib/python3.12/site-packages/attrs-22.1.0.dist-info/METADATA @@ -0,0 +1,240 @@ +Metadata-Version: 2.1 +Name: attrs +Version: 22.1.0 +Summary: Classes Without Boilerplate +Home-page: https://www.attrs.org/ +Author: Hynek Schlawack +Author-email: hs@ox.cx +Maintainer: Hynek Schlawack +Maintainer-email: hs@ox.cx +License: MIT +Project-URL: Documentation, https://www.attrs.org/ +Project-URL: Changelog, https://www.attrs.org/en/stable/changelog.html +Project-URL: Bug Tracker, https://github.com/python-attrs/attrs/issues +Project-URL: Source Code, https://github.com/python-attrs/attrs +Project-URL: Funding, https://github.com/sponsors/hynek +Project-URL: Tidelift, https://tidelift.com/subscription/pkg/pypi-attrs?utm_source=pypi-attrs&utm_medium=pypi +Project-URL: Ko-fi, https://ko-fi.com/the_hynek +Keywords: class,attribute,boilerplate +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Natural Language :: English +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Requires-Python: >=3.5 +Description-Content-Type: text/x-rst +License-File: LICENSE +License-File: AUTHORS.rst +Provides-Extra: dev +Requires-Dist: coverage[toml] (>=5.0.2) ; extra == 'dev' +Requires-Dist: hypothesis ; extra == 'dev' +Requires-Dist: pympler ; extra == 'dev' +Requires-Dist: pytest (>=4.3.0) ; extra == 'dev' +Requires-Dist: mypy (!=0.940,>=0.900) ; extra == 'dev' +Requires-Dist: pytest-mypy-plugins ; extra == 'dev' +Requires-Dist: zope.interface ; extra == 'dev' +Requires-Dist: furo ; extra == 'dev' +Requires-Dist: sphinx ; extra == 'dev' +Requires-Dist: sphinx-notfound-page ; extra == 'dev' +Requires-Dist: pre-commit ; extra == 'dev' +Requires-Dist: cloudpickle ; (platform_python_implementation == "CPython") and extra == 'dev' +Provides-Extra: docs +Requires-Dist: furo ; extra == 'docs' +Requires-Dist: sphinx ; extra == 'docs' +Requires-Dist: zope.interface ; extra == 'docs' +Requires-Dist: sphinx-notfound-page ; extra == 'docs' +Provides-Extra: tests +Requires-Dist: coverage[toml] (>=5.0.2) ; extra == 'tests' +Requires-Dist: hypothesis ; extra == 'tests' +Requires-Dist: pympler ; extra == 'tests' +Requires-Dist: pytest (>=4.3.0) ; extra == 'tests' +Requires-Dist: mypy (!=0.940,>=0.900) ; extra == 'tests' +Requires-Dist: pytest-mypy-plugins ; extra == 'tests' +Requires-Dist: zope.interface ; extra == 'tests' +Requires-Dist: cloudpickle ; (platform_python_implementation == "CPython") and extra == 'tests' +Provides-Extra: tests_no_zope +Requires-Dist: coverage[toml] (>=5.0.2) ; extra == 'tests_no_zope' +Requires-Dist: hypothesis ; extra == 'tests_no_zope' +Requires-Dist: pympler ; extra == 'tests_no_zope' +Requires-Dist: pytest (>=4.3.0) ; extra == 'tests_no_zope' +Requires-Dist: mypy (!=0.940,>=0.900) ; extra == 'tests_no_zope' +Requires-Dist: pytest-mypy-plugins ; extra == 'tests_no_zope' +Requires-Dist: cloudpickle ; (platform_python_implementation == "CPython") and extra == 'tests_no_zope' + + +.. image:: https://www.attrs.org/en/stable/_static/attrs_logo.png + :alt: attrs logo + :align: center + + +``attrs`` is the Python package that will bring back the **joy** of **writing classes** by relieving you from the drudgery of implementing object protocols (aka `dunder methods `_). +`Trusted by NASA `_ for Mars missions since 2020! + +Its main goal is to help you to write **concise** and **correct** software without slowing down your code. + +.. teaser-end + +For that, it gives you a class decorator and a way to declaratively define the attributes on that class: + +.. -code-begin- + +.. code-block:: pycon + + >>> from attrs import asdict, define, make_class, Factory + + >>> @define + ... class SomeClass: + ... a_number: int = 42 + ... list_of_numbers: list[int] = Factory(list) + ... + ... def hard_math(self, another_number): + ... return self.a_number + sum(self.list_of_numbers) * another_number + + + >>> sc = SomeClass(1, [1, 2, 3]) + >>> sc + SomeClass(a_number=1, list_of_numbers=[1, 2, 3]) + + >>> sc.hard_math(3) + 19 + >>> sc == SomeClass(1, [1, 2, 3]) + True + >>> sc != SomeClass(2, [3, 2, 1]) + True + + >>> asdict(sc) + {'a_number': 1, 'list_of_numbers': [1, 2, 3]} + + >>> SomeClass() + SomeClass(a_number=42, list_of_numbers=[]) + + >>> C = make_class("C", ["a", "b"]) + >>> C("foo", "bar") + C(a='foo', b='bar') + + +After *declaring* your attributes, ``attrs`` gives you: + +- a concise and explicit overview of the class's attributes, +- a nice human-readable ``__repr__``, +- equality-checking methods, +- an initializer, +- and much more, + +*without* writing dull boilerplate code again and again and *without* runtime performance penalties. + +**Hate type annotations**!? +No problem! +Types are entirely **optional** with ``attrs``. +Simply assign ``attrs.field()`` to the attributes instead of annotating them with types. + +---- + +This example uses ``attrs``'s modern APIs that have been introduced in version 20.1.0, and the ``attrs`` package import name that has been added in version 21.3.0. +The classic APIs (``@attr.s``, ``attr.ib``, plus their serious-business aliases) and the ``attr`` package import name will remain **indefinitely**. + +Please check out `On The Core API Names `_ for a more in-depth explanation. + + +Data Classes +============ + +On the tin, ``attrs`` might remind you of ``dataclasses`` (and indeed, ``dataclasses`` `are a descendant `_ of ``attrs``). +In practice it does a lot more and is more flexible. +For instance it allows you to define `special handling of NumPy arrays for equality checks `_, or allows more ways to `plug into the initialization process `_. + +For more details, please refer to our `comparison page `_. + +.. -project-information- + +Project Information +=================== + +- **License**: `MIT `_ +- **PyPI**: https://pypi.org/project/attrs/ +- **Source Code**: https://github.com/python-attrs/attrs +- **Documentation**: https://www.attrs.org/ +- **Changelog**: https://www.attrs.org/en/stable/changelog.html +- **Get Help**: please use the ``python-attrs`` tag on `StackOverflow `_ +- **Third-party Extensions**: https://github.com/python-attrs/attrs/wiki/Extensions-to-attrs +- **Supported Python Versions**: 3.5 and later (last 2.7-compatible release is `21.4.0 `_) + +If you'd like to contribute to ``attrs`` you're most welcome and we've written `a little guide `_ to get you started! + + +``attrs`` for Enterprise +------------------------ + +Available as part of the Tidelift Subscription. + +The maintainers of ``attrs`` and thousands of other packages are working with Tidelift to deliver commercial support and maintenance for the open source packages you use to build your applications. +Save time, reduce risk, and improve code health, while paying the maintainers of the exact packages you use. +`Learn more. `_ + + +Release Information +=================== + +22.1.0 (2022-07-28) +------------------- + +Backwards-incompatible Changes +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Python 2.7 is not supported anymore. + + Dealing with Python 2.7 tooling has become too difficult for a volunteer-run project. + + We have supported Python 2 more than 2 years after it was officially discontinued and feel that we have paid our dues. + All version up to 21.4.0 from December 2021 remain fully functional, of course. + `#936 `_ +- The deprecated ``cmp`` attribute of ``attrs.Attribute`` has been removed. + This does not affect the *cmp* argument to ``attr.s`` that can be used as a shortcut to set *eq* and *order* at the same time. + `#939 `_ + + +Changes +^^^^^^^ + +- Instantiation of frozen slotted classes is now faster. + `#898 `_ +- If an ``eq`` key is defined, it is also used before hashing the attribute. + `#909 `_ +- Added ``attrs.validators.min_len()``. + `#916 `_ +- ``attrs.validators.deep_iterable()``'s *member_validator* argument now also accepts a list of validators and wraps them in an ``attrs.validators.and_()``. + `#925 `_ +- Added missing type stub re-imports for ``attrs.converters`` and ``attrs.filters``. + `#931 `_ +- Added missing stub for ``attr(s).cmp_using()``. + `#949 `_ +- ``attrs.validators._in()``'s ``ValueError`` is not missing the attribute, expected options, and the value it got anymore. + `#951 `_ +- Python 3.11 is now officially supported. + `#969 `_ + +`Full changelog `_. + +Credits +======= + +``attrs`` is written and maintained by `Hynek Schlawack `_. + +The development is kindly supported by `Variomedia AG `_. + +A full list of contributors can be found in `GitHub's overview `_. + +It’s the spiritual successor of `characteristic `_ and aspires to fix some of it clunkiness and unfortunate decisions. +Both were inspired by Twisted’s `FancyEqMixin `_ but both are implemented using class decorators because `subclassing is bad for you `_, m’kay? diff --git a/venv/lib/python3.12/site-packages/attrs-22.1.0.dist-info/RECORD b/venv/lib/python3.12/site-packages/attrs-22.1.0.dist-info/RECORD new file mode 100644 index 0000000000..0b446daa2d --- /dev/null +++ b/venv/lib/python3.12/site-packages/attrs-22.1.0.dist-info/RECORD @@ -0,0 +1,57 @@ +attr/__init__.py,sha256=KZjj88xCd_tH-W59HR1EvHiYAUi4Zd1dzOxx8P77jeI,1602 +attr/__init__.pyi,sha256=t-1r-I1VnyxFrqic__QxIk1HUc3ag53L2b8lv0xDZTw,15137 +attr/__pycache__/__init__.cpython-312.pyc,, +attr/__pycache__/_cmp.cpython-312.pyc,, +attr/__pycache__/_compat.cpython-312.pyc,, +attr/__pycache__/_config.cpython-312.pyc,, +attr/__pycache__/_funcs.cpython-312.pyc,, +attr/__pycache__/_make.cpython-312.pyc,, +attr/__pycache__/_next_gen.cpython-312.pyc,, +attr/__pycache__/_version_info.cpython-312.pyc,, +attr/__pycache__/converters.cpython-312.pyc,, +attr/__pycache__/exceptions.cpython-312.pyc,, +attr/__pycache__/filters.cpython-312.pyc,, +attr/__pycache__/setters.cpython-312.pyc,, +attr/__pycache__/validators.cpython-312.pyc,, +attr/_cmp.py,sha256=Mmqj-6w71g_vx0TTLLkU4pbmv28qz_FyBGcUb1HX9ZE,4102 +attr/_cmp.pyi,sha256=cSlVvIH4As2NlDIoLglPEbSrBMWVVTpwExVDDBH6pn8,357 +attr/_compat.py,sha256=Qr9kZOu95Og7joPaQiXoPb54epKqxNU39Xu0u4QVGZI,5568 +attr/_config.py,sha256=5W8lgRePuIOWu1ZuqF1899e2CmXGc95-ipwTpF1cEU4,826 +attr/_funcs.py,sha256=XTFKZtd5zxsUvWocBw7VAswTxH-nFHk0H8gJ2JQkxD4,14645 +attr/_make.py,sha256=Srxbhb8kB17T6nP9e_dgcXY72zda9xfL5uJzva6LExY,97569 +attr/_next_gen.py,sha256=N0Gb5WdBHfcfQhcUsLAc_2ZYzl0JtAX1NOHuDgvkecE,5882 +attr/_version_info.py,sha256=exSqb3b5E-fMSsgZAlEw9XcLpEgobPORCZpcaEglAM4,2121 +attr/_version_info.pyi,sha256=x_M3L3WuB7r_ULXAWjx959udKQ4HLB8l-hsc1FDGNvk,209 +attr/converters.py,sha256=TWCfmCAxk8s2tgTSYnyQv9MRDPf1pk8Rj16KO_Xqe1c,3610 +attr/converters.pyi,sha256=MQo7iEzPNVoFpKqD30sVwgVpdNoIeSCF2nsXvoxLZ-Y,416 +attr/exceptions.py,sha256=ZGEMLv0CDY1TOtj49OF84myejOn-LCCXAKGIKalKkVU,1915 +attr/exceptions.pyi,sha256=zZq8bCUnKAy9mDtBEw42ZhPhAUIHoTKedDQInJD883M,539 +attr/filters.py,sha256=aZep54h8-4ZYV5lmZ3Dx2mqeQH4cMx6tuCmCylLNbEU,1038 +attr/filters.pyi,sha256=_Sm80jGySETX_Clzdkon5NHVjQWRl3Y3liQKZX1czXc,215 +attr/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +attr/setters.py,sha256=pbCZQ-pE6ZxjDqZfWWUhUFefXtpekIU4qS_YDMLPQ50,1400 +attr/setters.pyi,sha256=7dM10rqpQVDW0y-iJUnq8rabdO5Wx2Sbo5LwNa0IXl0,573 +attr/validators.py,sha256=cpOHMNSt02ApbTQtQAwBTMeWZqp0u_sx-e3xH-jTyR4,16793 +attr/validators.pyi,sha256=6ngbvnWnFSkbf5J2dK86pq2xpEtrwzWTKfJ4aUvSIlk,2355 +attrs-22.1.0.dist-info/AUTHORS.rst,sha256=jau5p7JMPbBJeCCpGBWsRj8zpxUVAhpyoHFJRfjM888,743 +attrs-22.1.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +attrs-22.1.0.dist-info/LICENSE,sha256=iCEVyV38KvHutnFPjsbVy8q_Znyv-HKfQkINpj9xTp8,1109 +attrs-22.1.0.dist-info/METADATA,sha256=vwSMK_BbEgVHrgFWpj3iW0PISTMPHzi6qham9jg7LtA,11015 +attrs-22.1.0.dist-info/RECORD,, +attrs-22.1.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +attrs-22.1.0.dist-info/WHEEL,sha256=z9j0xAa_JmUKMpmz72K0ZGALSM_n-wQVmGbleXx2VHg,110 +attrs-22.1.0.dist-info/top_level.txt,sha256=AGbmKnOtYpdkLRsDRQVSBIwfL32pAQ6BSo1mt-BxI7M,11 +attrs/__init__.py,sha256=CeyxLGVViAEKKsLOLaif8vF3vs1a28vsrRVLv7eMEgM,1109 +attrs/__init__.pyi,sha256=vuFxNbulP9Q7hfpO6Lb5A-_0mbEJOiwYyefjzXMqVfs,2100 +attrs/__pycache__/__init__.cpython-312.pyc,, +attrs/__pycache__/converters.cpython-312.pyc,, +attrs/__pycache__/exceptions.cpython-312.pyc,, +attrs/__pycache__/filters.cpython-312.pyc,, +attrs/__pycache__/setters.cpython-312.pyc,, +attrs/__pycache__/validators.cpython-312.pyc,, +attrs/converters.py,sha256=fCBEdlYWcmI3sCnpUk2pz22GYtXzqTkp6NeOpdI64PY,70 +attrs/exceptions.py,sha256=SlDli6AY77f6ny-H7oy98OkQjsrw-D_supEuErIVYkE,70 +attrs/filters.py,sha256=dc_dNey29kH6KLU1mT2Dakq7tZ3kBfzEGwzOmDzw1F8,67 +attrs/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +attrs/setters.py,sha256=oKw51C72Hh45wTwYvDHJP9kbicxiMhMR4Y5GvdpKdHQ,67 +attrs/validators.py,sha256=4ag1SyVD2Hm3PYKiNG_NOtR_e7f81Hr6GiNl4YvXo4Q,70 diff --git a/venv/lib/python3.12/site-packages/attrs-22.1.0.dist-info/REQUESTED b/venv/lib/python3.12/site-packages/attrs-22.1.0.dist-info/REQUESTED new file mode 100644 index 0000000000..e69de29bb2 diff --git a/venv/lib/python3.12/site-packages/attrs-22.1.0.dist-info/WHEEL b/venv/lib/python3.12/site-packages/attrs-22.1.0.dist-info/WHEEL new file mode 100644 index 0000000000..0b18a28110 --- /dev/null +++ b/venv/lib/python3.12/site-packages/attrs-22.1.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/venv/lib/python3.12/site-packages/attrs-22.1.0.dist-info/top_level.txt b/venv/lib/python3.12/site-packages/attrs-22.1.0.dist-info/top_level.txt new file mode 100644 index 0000000000..eca8ba9f00 --- /dev/null +++ b/venv/lib/python3.12/site-packages/attrs-22.1.0.dist-info/top_level.txt @@ -0,0 +1,2 @@ +attr +attrs diff --git a/venv/lib/python3.12/site-packages/attrs/__init__.py b/venv/lib/python3.12/site-packages/attrs/__init__.py new file mode 100644 index 0000000000..a704b8b56b --- /dev/null +++ b/venv/lib/python3.12/site-packages/attrs/__init__.py @@ -0,0 +1,70 @@ +# SPDX-License-Identifier: MIT + +from attr import ( + NOTHING, + Attribute, + Factory, + __author__, + __copyright__, + __description__, + __doc__, + __email__, + __license__, + __title__, + __url__, + __version__, + __version_info__, + assoc, + cmp_using, + define, + evolve, + field, + fields, + fields_dict, + frozen, + has, + make_class, + mutable, + resolve_types, + validate, +) +from attr._next_gen import asdict, astuple + +from . import converters, exceptions, filters, setters, validators + + +__all__ = [ + "__author__", + "__copyright__", + "__description__", + "__doc__", + "__email__", + "__license__", + "__title__", + "__url__", + "__version__", + "__version_info__", + "asdict", + "assoc", + "astuple", + "Attribute", + "cmp_using", + "converters", + "define", + "evolve", + "exceptions", + "Factory", + "field", + "fields_dict", + "fields", + "filters", + "frozen", + "has", + "make_class", + "mutable", + "NOTHING", + "resolve_types", + "setters", + "validate", + "validators", +] diff --git a/venv/lib/python3.12/site-packages/attrs/__init__.pyi b/venv/lib/python3.12/site-packages/attrs/__init__.pyi new file mode 100644 index 0000000000..fc44de46a0 --- /dev/null +++ b/venv/lib/python3.12/site-packages/attrs/__init__.pyi @@ -0,0 +1,66 @@ +from typing import ( + Any, + Callable, + Dict, + Mapping, + Optional, + Sequence, + Tuple, + Type, +) + +# Because we need to type our own stuff, we have to make everything from +# attr explicitly public too. +from attr import __author__ as __author__ +from attr import __copyright__ as __copyright__ +from attr import __description__ as __description__ +from attr import __email__ as __email__ +from attr import __license__ as __license__ +from attr import __title__ as __title__ +from attr import __url__ as __url__ +from attr import __version__ as __version__ +from attr import __version_info__ as __version_info__ +from attr import _FilterType +from attr import assoc as assoc +from attr import Attribute as Attribute +from attr import cmp_using as cmp_using +from attr import converters as converters +from attr import define as define +from attr import evolve as evolve +from attr import exceptions as exceptions +from attr import Factory as Factory +from attr import field as field +from attr import fields as fields +from attr import fields_dict as fields_dict +from attr import filters as filters +from attr import frozen as frozen +from attr import has as has +from attr import make_class as make_class +from attr import mutable as mutable +from attr import NOTHING as NOTHING +from attr import resolve_types as resolve_types +from attr import setters as setters +from attr import validate as validate +from attr import validators as validators + +# TODO: see definition of attr.asdict/astuple +def asdict( + inst: Any, + recurse: bool = ..., + filter: Optional[_FilterType[Any]] = ..., + dict_factory: Type[Mapping[Any, Any]] = ..., + retain_collection_types: bool = ..., + value_serializer: Optional[ + Callable[[type, Attribute[Any], Any], Any] + ] = ..., + tuple_keys: bool = ..., +) -> Dict[str, Any]: ... + +# TODO: add support for returning NamedTuple from the mypy plugin +def astuple( + inst: Any, + recurse: bool = ..., + filter: Optional[_FilterType[Any]] = ..., + tuple_factory: Type[Sequence[Any]] = ..., + retain_collection_types: bool = ..., +) -> Tuple[Any, ...]: ... diff --git a/venv/lib/python3.12/site-packages/attrs/converters.py b/venv/lib/python3.12/site-packages/attrs/converters.py new file mode 100644 index 0000000000..edfa8d3c16 --- /dev/null +++ b/venv/lib/python3.12/site-packages/attrs/converters.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.converters import * # noqa diff --git a/venv/lib/python3.12/site-packages/attrs/exceptions.py b/venv/lib/python3.12/site-packages/attrs/exceptions.py new file mode 100644 index 0000000000..bd9efed202 --- /dev/null +++ b/venv/lib/python3.12/site-packages/attrs/exceptions.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.exceptions import * # noqa diff --git a/venv/lib/python3.12/site-packages/attrs/filters.py b/venv/lib/python3.12/site-packages/attrs/filters.py new file mode 100644 index 0000000000..52959005b0 --- /dev/null +++ b/venv/lib/python3.12/site-packages/attrs/filters.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.filters import * # noqa diff --git a/venv/lib/python3.12/site-packages/attrs/py.typed b/venv/lib/python3.12/site-packages/attrs/py.typed new file mode 100644 index 0000000000..e69de29bb2 diff --git a/venv/lib/python3.12/site-packages/attrs/setters.py b/venv/lib/python3.12/site-packages/attrs/setters.py new file mode 100644 index 0000000000..9b50770804 --- /dev/null +++ b/venv/lib/python3.12/site-packages/attrs/setters.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.setters import * # noqa diff --git a/venv/lib/python3.12/site-packages/attrs/validators.py b/venv/lib/python3.12/site-packages/attrs/validators.py new file mode 100644 index 0000000000..ab2c9b3024 --- /dev/null +++ b/venv/lib/python3.12/site-packages/attrs/validators.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.validators import * # noqa diff --git a/venv/lib/python3.12/site-packages/beautifulsoup4-4.13.4.dist-info/INSTALLER b/venv/lib/python3.12/site-packages/beautifulsoup4-4.13.4.dist-info/INSTALLER new file mode 100644 index 0000000000..a1b589e38a --- /dev/null +++ b/venv/lib/python3.12/site-packages/beautifulsoup4-4.13.4.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.12/site-packages/beautifulsoup4-4.13.4.dist-info/METADATA b/venv/lib/python3.12/site-packages/beautifulsoup4-4.13.4.dist-info/METADATA new file mode 100644 index 0000000000..eddfd55f05 --- /dev/null +++ b/venv/lib/python3.12/site-packages/beautifulsoup4-4.13.4.dist-info/METADATA @@ -0,0 +1,123 @@ +Metadata-Version: 2.4 +Name: beautifulsoup4 +Version: 4.13.4 +Summary: Screen-scraping library +Project-URL: Download, https://www.crummy.com/software/BeautifulSoup/bs4/download/ +Project-URL: Homepage, https://www.crummy.com/software/BeautifulSoup/bs4/ +Author-email: Leonard Richardson +License: MIT License +License-File: AUTHORS +License-File: LICENSE +Keywords: HTML,XML,parse,soup +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Markup :: HTML +Classifier: Topic :: Text Processing :: Markup :: SGML +Classifier: Topic :: Text Processing :: Markup :: XML +Requires-Python: >=3.7.0 +Requires-Dist: soupsieve>1.2 +Requires-Dist: typing-extensions>=4.0.0 +Provides-Extra: cchardet +Requires-Dist: cchardet; extra == 'cchardet' +Provides-Extra: chardet +Requires-Dist: chardet; extra == 'chardet' +Provides-Extra: charset-normalizer +Requires-Dist: charset-normalizer; extra == 'charset-normalizer' +Provides-Extra: html5lib +Requires-Dist: html5lib; extra == 'html5lib' +Provides-Extra: lxml +Requires-Dist: lxml; extra == 'lxml' +Description-Content-Type: text/markdown + +Beautiful Soup is a library that makes it easy to scrape information +from web pages. It sits atop an HTML or XML parser, providing Pythonic +idioms for iterating, searching, and modifying the parse tree. + +# Quick start + +``` +>>> from bs4 import BeautifulSoup +>>> soup = BeautifulSoup("

SomebadHTML") +>>> print(soup.prettify()) + + +

+ Some + + bad + + HTML + + +

+ + +>>> soup.find(string="bad") +'bad' +>>> soup.i +HTML +# +>>> soup = BeautifulSoup("SomebadXML", "xml") +# +>>> print(soup.prettify()) + + + Some + + bad + + XML + + +``` + +To go beyond the basics, [comprehensive documentation is available](https://www.crummy.com/software/BeautifulSoup/bs4/doc/). + +# Links + +* [Homepage](https://www.crummy.com/software/BeautifulSoup/bs4/) +* [Documentation](https://www.crummy.com/software/BeautifulSoup/bs4/doc/) +* [Discussion group](https://groups.google.com/group/beautifulsoup/) +* [Development](https://code.launchpad.net/beautifulsoup/) +* [Bug tracker](https://bugs.launchpad.net/beautifulsoup/) +* [Complete changelog](https://git.launchpad.net/beautifulsoup/tree/CHANGELOG) + +# Note on Python 2 sunsetting + +Beautiful Soup's support for Python 2 was discontinued on December 31, +2020: one year after the sunset date for Python 2 itself. From this +point onward, new Beautiful Soup development will exclusively target +Python 3. The final release of Beautiful Soup 4 to support Python 2 +was 4.9.3. + +# Supporting the project + +If you use Beautiful Soup as part of your professional work, please consider a +[Tidelift subscription](https://tidelift.com/subscription/pkg/pypi-beautifulsoup4?utm_source=pypi-beautifulsoup4&utm_medium=referral&utm_campaign=readme). +This will support many of the free software projects your organization +depends on, not just Beautiful Soup. + +If you use Beautiful Soup for personal projects, the best way to say +thank you is to read +[Tool Safety](https://www.crummy.com/software/BeautifulSoup/zine/), a zine I +wrote about what Beautiful Soup has taught me about software +development. + +# Building the documentation + +The bs4/doc/ directory contains full documentation in Sphinx +format. Run `make html` in that directory to create HTML +documentation. + +# Running the unit tests + +Beautiful Soup supports unit test discovery using Pytest: + +``` +$ pytest +``` + diff --git a/venv/lib/python3.12/site-packages/beautifulsoup4-4.13.4.dist-info/RECORD b/venv/lib/python3.12/site-packages/beautifulsoup4-4.13.4.dist-info/RECORD new file mode 100644 index 0000000000..20e94c9c4a --- /dev/null +++ b/venv/lib/python3.12/site-packages/beautifulsoup4-4.13.4.dist-info/RECORD @@ -0,0 +1,89 @@ +beautifulsoup4-4.13.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +beautifulsoup4-4.13.4.dist-info/METADATA,sha256=zXwPMgMEIdymEV_lAL0QzP34i6osem_P2nfqgaEexRo,3809 +beautifulsoup4-4.13.4.dist-info/RECORD,, +beautifulsoup4-4.13.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87 +beautifulsoup4-4.13.4.dist-info/licenses/AUTHORS,sha256=uYkjiRjh_aweRnF8tAW2PpJJeickE68NmJwd9siry28,2201 +beautifulsoup4-4.13.4.dist-info/licenses/LICENSE,sha256=VbTY1LHlvIbRDvrJG3TIe8t3UmsPW57a-LnNKtxzl7I,1441 +bs4/__init__.py,sha256=ZkHAdXHt3PEkwDrx_Z4ABuz-T_-TSO8aL6Vpy9uiI8M,44212 +bs4/__pycache__/__init__.cpython-312.pyc,, +bs4/__pycache__/_deprecation.cpython-312.pyc,, +bs4/__pycache__/_typing.cpython-312.pyc,, +bs4/__pycache__/_warnings.cpython-312.pyc,, +bs4/__pycache__/css.cpython-312.pyc,, +bs4/__pycache__/dammit.cpython-312.pyc,, +bs4/__pycache__/diagnose.cpython-312.pyc,, +bs4/__pycache__/element.cpython-312.pyc,, +bs4/__pycache__/exceptions.cpython-312.pyc,, +bs4/__pycache__/filter.cpython-312.pyc,, +bs4/__pycache__/formatter.cpython-312.pyc,, +bs4/_deprecation.py,sha256=ucZjfBAUF1B0f5ldNIIhlkHsYjHtvwELWlE3_pAR6Vs,2394 +bs4/_typing.py,sha256=3FgPPPrdsTa-kvn1R36o1k_2SfilcUWm4M9i7G4qFl8,7118 +bs4/_warnings.py,sha256=ZuOETgcnEbZgw2N0nnNXn6wvtrn2ut7AF0d98bvkMFc,4711 +bs4/builder/__init__.py,sha256=TYAKmGFuVfTsI53reHijcZKETnPuvse57KZ6LsZsJRo,31130 +bs4/builder/__pycache__/__init__.cpython-312.pyc,, +bs4/builder/__pycache__/_html5lib.cpython-312.pyc,, +bs4/builder/__pycache__/_htmlparser.cpython-312.pyc,, +bs4/builder/__pycache__/_lxml.cpython-312.pyc,, +bs4/builder/_html5lib.py,sha256=3MXq29SYg9XoS9gu2hgTDU02IQkv8kIBx3rW1QWY3fg,22846 +bs4/builder/_htmlparser.py,sha256=cu9PFkxkqVIIe9nU3fVy-JJhINEhY8cGbsuCwZCnQCA,17872 +bs4/builder/_lxml.py,sha256=9L5pJ7ekI-G3HhOhoxcpjZPqDMRfUoWj4Nnku6B2uR8,18545 +bs4/css.py,sha256=XGQq7HQUDyYEbDorFMGIGek7QGPiFuZYnvNEQ59GyxM,12685 +bs4/dammit.py,sha256=oHd1elJ44kMobBGSQRuG7Wln6M-BLz1unOuUscaL9h0,51472 +bs4/diagnose.py,sha256=zy7_GPQHsTtNf8s10WWIRcC5xH5_8LKs295Aa7iFUyI,7832 +bs4/element.py,sha256=7w9cLT9oTU8lrWm9nnlPqHpa_v4U8UuyIUsWnTHYLbw,109768 +bs4/exceptions.py,sha256=Q9FOadNe8QRvzDMaKSXe2Wtl8JK_oAZW7mbFZBVP_GE,951 +bs4/filter.py,sha256=8lzv5yE0mw7jhYwPyDiFCfXAB4-wDiIgn7m8fmIKcGA,29031 +bs4/formatter.py,sha256=5O4gBxTTi5TLU6TdqsgYI9Io0Gc_6-oCAWpfHI3Thn0,10464 +bs4/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +bs4/tests/__init__.py,sha256=ieOudaXvOWA_g4Gb25adCnhEabtEpRwf9_DdplXKdXQ,52671 +bs4/tests/__pycache__/__init__.cpython-312.pyc,, +bs4/tests/__pycache__/test_builder.cpython-312.pyc,, +bs4/tests/__pycache__/test_builder_registry.cpython-312.pyc,, +bs4/tests/__pycache__/test_css.cpython-312.pyc,, +bs4/tests/__pycache__/test_dammit.cpython-312.pyc,, +bs4/tests/__pycache__/test_element.cpython-312.pyc,, +bs4/tests/__pycache__/test_filter.cpython-312.pyc,, +bs4/tests/__pycache__/test_formatter.cpython-312.pyc,, +bs4/tests/__pycache__/test_fuzz.cpython-312.pyc,, +bs4/tests/__pycache__/test_html5lib.cpython-312.pyc,, +bs4/tests/__pycache__/test_htmlparser.cpython-312.pyc,, +bs4/tests/__pycache__/test_lxml.cpython-312.pyc,, +bs4/tests/__pycache__/test_navigablestring.cpython-312.pyc,, +bs4/tests/__pycache__/test_pageelement.cpython-312.pyc,, +bs4/tests/__pycache__/test_soup.cpython-312.pyc,, +bs4/tests/__pycache__/test_tag.cpython-312.pyc,, +bs4/tests/__pycache__/test_tree.cpython-312.pyc,, +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4670634698080256.testcase,sha256=yUdXkbpNK7LVOQ0LBHMoqZ1rWaBfSXWytoO_xdSm7Ho,15 +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4818336571064320.testcase,sha256=Uv_dx4a43TSfoNkjU-jHW2nSXkqHFg4XdAw7SWVObUk,23 +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4999465949331456.testcase,sha256=OEyVA0Ej4FxswOElrUNt0In4s4YhrmtaxE_NHGZvGtg,30 +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5000587759190016.testcase,sha256=G4vpNBOz-RwMpi6ewEgNEa13zX0sXhmL7VHOyIcdKVQ,15347 +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5167584867909632.testcase,sha256=3d8z65o4p7Rur-RmCHoOjzqaYQ8EAtjmiBYTHNyAdl4,19469 +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5270998950477824.testcase,sha256=NfGIlit1k40Ip3mlnBkYOkIDJX6gHtjlErwl7gsBjAQ,12 +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5375146639360000.testcase,sha256=xy4i1U0nhFHcnyc5pRKS6JRMvuoCNUur-Scor6UxIGw,4317 +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5492400320282624.testcase,sha256=Q-UTYpQBUsWoMgIUspUlzveSI-41s4ABC3jajRb-K0o,11502 +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5703933063462912.testcase,sha256=2bq3S8KxZgk8EajLReHD8m4_0Lj_nrkyJAxB_z_U0D0,5 +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5843991618256896.testcase,sha256=MZDu31LPLfgu6jP9IZkrlwNes3f_sL8WFP5BChkUKdY,35 +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5984173902397440.testcase,sha256=w58r-s6besG5JwPXpnz37W2YTj9-_qxFbk6hiEnKeIQ,51495 +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6124268085182464.testcase,sha256=q8rkdMECEXKcqVhOf5zWHkSBTQeOPt0JiLg2TZiPCuk,10380 +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6241471367348224.testcase,sha256=QfzoOxKwNuqG-4xIrea6MOQLXhfAAOQJ0r9u-J6kSNs,19 +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6306874195312640.testcase,sha256=MJ2pHFuuCQUiQz1Kor2sof7LWeRERQ6QK43YNqQHg9o,47 +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6450958476902400.testcase,sha256=EItOpSdeD4ewK-qgJ9vtxennwn_huguzXgctrUT7fqE,3546 +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6600557255327744.testcase,sha256=a2aJTG4FceGSJXsjtxoS8S4jk_8rZsS3aznLkeO2_dY,124 +bs4/tests/fuzz/crash-0d306a50c8ed8bcd0785b67000fcd5dea1d33f08.testcase,sha256=jRFRtCKlP3-3EDLc_iVRTcE6JNymv0rYcVM6qRaPrxI,2607 +bs4/tests/fuzz/crash-ffbdfa8a2b26f13537b68d3794b0478a4090ee4a.testcase,sha256=7NsdCiXWAhNkmoW1pvF7rbZExyLAQIWtDtSHXIsH6YU,103 +bs4/tests/test_builder.py,sha256=BBMBirb4mb-fVdJj4dxQCxrdcjQeulKSKBFrPFVpVOk,1095 +bs4/tests/test_builder_registry.py,sha256=tpJ5Niva_cF49SdzIb1gMo0W4Tiodr8BYSOE3O6P_g8,5064 +bs4/tests/test_css.py,sha256=T_HCMzpe6hTr8d2YFXm0DScr8gT8d6h0MYlhZfo6A4U,18625 +bs4/tests/test_dammit.py,sha256=TQCVe6kKVYSuYjwTtIvIaOYYmWYPMnR_3PK45kimLg4,17840 +bs4/tests/test_element.py,sha256=u7FbTtKE6pYJetD1PgS3fCU1-QQXfB7GaLwfI3s4ROY,4373 +bs4/tests/test_filter.py,sha256=JzvU3eI4r1QyNIHKqhxbZ-M7q5qgw-S7Y73MGI2tNr4,27395 +bs4/tests/test_formatter.py,sha256=a6TaeNOVeg_ZYseiP7atmFyYJkQJqlk-jlVxMlyJC2o,6943 +bs4/tests/test_fuzz.py,sha256=zyaoWgCt8hnRkXecBYM9x91fI_Ao9eQUcsBi76ooJ08,7123 +bs4/tests/test_html5lib.py,sha256=ljMOAds__k9zhfT4jVnxxhZkLEggaT7wqDexzDNwus4,9206 +bs4/tests/test_htmlparser.py,sha256=iDHEI69GcisNP48BeHdLAWlqPGhrBwxftnUM8_3nsR4,6662 +bs4/tests/test_lxml.py,sha256=4fZIsNVbm2zdRQFNNwD-lqwf_QtUtiU4QbtLXISQZBw,7453 +bs4/tests/test_navigablestring.py,sha256=5-vs86ujXcs6LjT1lPYQHLi2ag3eBtFfnrOrjzV665M,5599 +bs4/tests/test_pageelement.py,sha256=lAw-sVP3zJX0VdHXXN1Ia3tci5dgK10Gac5o9G46IIk,16195 +bs4/tests/test_soup.py,sha256=I-mhNheo2-PTvfJToDI43EO4RmGlpKJsYOS19YoQ7-8,22669 +bs4/tests/test_tag.py,sha256=ue32hxQs_a1cMuzyu7MNjK42t0IOGMA6POPLIArMOts,9690 +bs4/tests/test_tree.py,sha256=Iw2e2spk19PEtCObE0ymgJH4n4F5_KbMvpcOeblHBsI,55074 diff --git a/venv/lib/python3.12/site-packages/beautifulsoup4-4.13.4.dist-info/WHEEL b/venv/lib/python3.12/site-packages/beautifulsoup4-4.13.4.dist-info/WHEEL new file mode 100644 index 0000000000..12228d414b --- /dev/null +++ b/venv/lib/python3.12/site-packages/beautifulsoup4-4.13.4.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.27.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/venv/lib/python3.12/site-packages/beautifulsoup4-4.13.4.dist-info/licenses/AUTHORS b/venv/lib/python3.12/site-packages/beautifulsoup4-4.13.4.dist-info/licenses/AUTHORS new file mode 100644 index 0000000000..18926c29de --- /dev/null +++ b/venv/lib/python3.12/site-packages/beautifulsoup4-4.13.4.dist-info/licenses/AUTHORS @@ -0,0 +1,49 @@ +Behold, mortal, the origins of Beautiful Soup... +================================================ + +Leonard Richardson is the primary maintainer. + +Aaron DeVore, Isaac Muse and Chris Papademetrious have made +significant contributions to the code base. + +Mark Pilgrim provided the encoding detection code that forms the base +of UnicodeDammit. + +Thomas Kluyver and Ezio Melotti finished the work of getting Beautiful +Soup 4 working under Python 3. + +Simon Willison wrote soupselect, which was used to make Beautiful Soup +support CSS selectors. Isaac Muse wrote SoupSieve, which made it +possible to _remove_ the CSS selector code from Beautiful Soup. + +Sam Ruby helped with a lot of edge cases. + +Jonathan Ellis was awarded the prestigious Beau Potage D'Or for his +work in solving the nestable tags conundrum. + +An incomplete list of people have contributed patches to Beautiful +Soup: + + Istvan Albert, Andrew Lin, Anthony Baxter, Oliver Beattie, Andrew +Boyko, Tony Chang, Francisco Canas, "Delong", Zephyr Fang, Fuzzy, +Roman Gaufman, Yoni Gilad, Richie Hindle, Toshihiro Kamiya, Peteris +Krumins, Kent Johnson, Marek Kapolka, Andreas Kostyrka, Roel Kramer, +Ben Last, Robert Leftwich, Stefaan Lippens, "liquider", Staffan +Malmgren, Ksenia Marasanova, JP Moins, Adam Monsen, John Nagle, "Jon", +Ed Oskiewicz, Martijn Peters, Greg Phillips, Giles Radford, Stefano +Revera, Arthur Rudolph, Marko Samastur, James Salter, Jouni Seppänen, +Alexander Schmolck, Tim Shirley, Geoffrey Sneddon, Ville Skyttä, +"Vikas", Jens Svalgaard, Andy Theyers, Eric Weiser, Glyn Webster, John +Wiseman, Paul Wright, Danny Yoo + +An incomplete list of people who made suggestions or found bugs or +found ways to break Beautiful Soup: + + Hanno Böck, Matteo Bertini, Chris Curvey, Simon Cusack, Bruce Eckel, + Matt Ernst, Michael Foord, Tom Harris, Bill de hOra, Donald Howes, + Matt Patterson, Scott Roberts, Steve Strassmann, Mike Williams, + warchild at redho dot com, Sami Kuisma, Carlos Rocha, Bob Hutchison, + Joren Mc, Michal Migurski, John Kleven, Tim Heaney, Tripp Lilley, Ed + Summers, Dennis Sutch, Chris Smith, Aaron Swartz, Stuart + Turner, Greg Edwards, Kevin J Kalupson, Nikos Kouremenos, Artur de + Sousa Rocha, Yichun Wei, Per Vognsen diff --git a/venv/lib/python3.12/site-packages/beautifulsoup4-4.13.4.dist-info/licenses/LICENSE b/venv/lib/python3.12/site-packages/beautifulsoup4-4.13.4.dist-info/licenses/LICENSE new file mode 100644 index 0000000000..08e3a9cf8c --- /dev/null +++ b/venv/lib/python3.12/site-packages/beautifulsoup4-4.13.4.dist-info/licenses/LICENSE @@ -0,0 +1,31 @@ +Beautiful Soup is made available under the MIT license: + + Copyright (c) Leonard Richardson + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + +Beautiful Soup incorporates code from the html5lib library, which is +also made available under the MIT license. Copyright (c) James Graham +and other contributors + +Beautiful Soup has an optional dependency on the soupsieve library, +which is also made available under the MIT license. Copyright (c) +Isaac Muse diff --git a/venv/lib/python3.12/site-packages/bs4/__init__.py b/venv/lib/python3.12/site-packages/bs4/__init__.py new file mode 100644 index 0000000000..324fcfadf4 --- /dev/null +++ b/venv/lib/python3.12/site-packages/bs4/__init__.py @@ -0,0 +1,1170 @@ +"""Beautiful Soup Elixir and Tonic - "The Screen-Scraper's Friend". + +http://www.crummy.com/software/BeautifulSoup/ + +Beautiful Soup uses a pluggable XML or HTML parser to parse a +(possibly invalid) document into a tree representation. Beautiful Soup +provides methods and Pythonic idioms that make it easy to navigate, +search, and modify the parse tree. + +Beautiful Soup works with Python 3.7 and up. It works better if lxml +and/or html5lib is installed, but they are not required. + +For more than you ever wanted to know about Beautiful Soup, see the +documentation: http://www.crummy.com/software/BeautifulSoup/bs4/doc/ +""" + +__author__ = "Leonard Richardson (leonardr@segfault.org)" +__version__ = "4.13.4" +__copyright__ = "Copyright (c) 2004-2025 Leonard Richardson" +# Use of this source code is governed by the MIT license. +__license__ = "MIT" + +__all__ = [ + "AttributeResemblesVariableWarning", + "BeautifulSoup", + "Comment", + "Declaration", + "ProcessingInstruction", + "ResultSet", + "CSS", + "Script", + "Stylesheet", + "Tag", + "TemplateString", + "ElementFilter", + "UnicodeDammit", + "CData", + "Doctype", + + # Exceptions + "FeatureNotFound", + "ParserRejectedMarkup", + "StopParsing", + + # Warnings + "AttributeResemblesVariableWarning", + "GuessedAtParserWarning", + "MarkupResemblesLocatorWarning", + "UnusualUsageWarning", + "XMLParsedAsHTMLWarning", +] + +from collections import Counter +import sys +import warnings + +# The very first thing we do is give a useful error if someone is +# running this code under Python 2. +if sys.version_info.major < 3: + raise ImportError( + "You are trying to use a Python 3-specific version of Beautiful Soup under Python 2. This will not work. The final version of Beautiful Soup to support Python 2 was 4.9.3." + ) + +from .builder import ( + builder_registry, + TreeBuilder, +) +from .builder._htmlparser import HTMLParserTreeBuilder +from .dammit import UnicodeDammit +from .css import CSS +from ._deprecation import ( + _deprecated, +) +from .element import ( + CData, + Comment, + DEFAULT_OUTPUT_ENCODING, + Declaration, + Doctype, + NavigableString, + PageElement, + ProcessingInstruction, + PYTHON_SPECIFIC_ENCODINGS, + ResultSet, + Script, + Stylesheet, + Tag, + TemplateString, +) +from .formatter import Formatter +from .filter import ( + ElementFilter, + SoupStrainer, +) +from typing import ( + Any, + cast, + Counter as CounterType, + Dict, + Iterator, + List, + Sequence, + Optional, + Type, + Union, +) + +from bs4._typing import ( + _Encoding, + _Encodings, + _IncomingMarkup, + _InsertableElement, + _RawAttributeValue, + _RawAttributeValues, + _RawMarkup, +) + +# Import all warnings and exceptions into the main package. +from bs4.exceptions import ( + FeatureNotFound, + ParserRejectedMarkup, + StopParsing, +) +from bs4._warnings import ( + AttributeResemblesVariableWarning, + GuessedAtParserWarning, + MarkupResemblesLocatorWarning, + UnusualUsageWarning, + XMLParsedAsHTMLWarning, +) + + +class BeautifulSoup(Tag): + """A data structure representing a parsed HTML or XML document. + + Most of the methods you'll call on a BeautifulSoup object are inherited from + PageElement or Tag. + + Internally, this class defines the basic interface called by the + tree builders when converting an HTML/XML document into a data + structure. The interface abstracts away the differences between + parsers. To write a new tree builder, you'll need to understand + these methods as a whole. + + These methods will be called by the BeautifulSoup constructor: + * reset() + * feed(markup) + + The tree builder may call these methods from its feed() implementation: + * handle_starttag(name, attrs) # See note about return value + * handle_endtag(name) + * handle_data(data) # Appends to the current data node + * endData(containerClass) # Ends the current data node + + No matter how complicated the underlying parser is, you should be + able to build a tree using 'start tag' events, 'end tag' events, + 'data' events, and "done with data" events. + + If you encounter an empty-element tag (aka a self-closing tag, + like HTML's
tag), call handle_starttag and then + handle_endtag. + """ + + #: Since `BeautifulSoup` subclasses `Tag`, it's possible to treat it as + #: a `Tag` with a `Tag.name`. Hoever, this name makes it clear the + #: `BeautifulSoup` object isn't a real markup tag. + ROOT_TAG_NAME: str = "[document]" + + #: If the end-user gives no indication which tree builder they + #: want, look for one with these features. + DEFAULT_BUILDER_FEATURES: Sequence[str] = ["html", "fast"] + + #: A string containing all ASCII whitespace characters, used in + #: during parsing to detect data chunks that seem 'empty'. + ASCII_SPACES: str = "\x20\x0a\x09\x0c\x0d" + + # FUTURE PYTHON: + element_classes: Dict[Type[PageElement], Type[PageElement]] #: :meta private: + builder: TreeBuilder #: :meta private: + is_xml: bool + known_xml: Optional[bool] + parse_only: Optional[SoupStrainer] #: :meta private: + + # These members are only used while parsing markup. + markup: Optional[_RawMarkup] #: :meta private: + current_data: List[str] #: :meta private: + currentTag: Optional[Tag] #: :meta private: + tagStack: List[Tag] #: :meta private: + open_tag_counter: CounterType[str] #: :meta private: + preserve_whitespace_tag_stack: List[Tag] #: :meta private: + string_container_stack: List[Tag] #: :meta private: + _most_recent_element: Optional[PageElement] #: :meta private: + + #: Beautiful Soup's best guess as to the character encoding of the + #: original document. + original_encoding: Optional[_Encoding] + + #: The character encoding, if any, that was explicitly defined + #: in the original document. This may or may not match + #: `BeautifulSoup.original_encoding`. + declared_html_encoding: Optional[_Encoding] + + #: This is True if the markup that was parsed contains + #: U+FFFD REPLACEMENT_CHARACTER characters which were not present + #: in the original markup. These mark character sequences that + #: could not be represented in Unicode. + contains_replacement_characters: bool + + def __init__( + self, + markup: _IncomingMarkup = "", + features: Optional[Union[str, Sequence[str]]] = None, + builder: Optional[Union[TreeBuilder, Type[TreeBuilder]]] = None, + parse_only: Optional[SoupStrainer] = None, + from_encoding: Optional[_Encoding] = None, + exclude_encodings: Optional[_Encodings] = None, + element_classes: Optional[Dict[Type[PageElement], Type[PageElement]]] = None, + **kwargs: Any, + ): + """Constructor. + + :param markup: A string or a file-like object representing + markup to be parsed. + + :param features: Desirable features of the parser to be + used. This may be the name of a specific parser ("lxml", + "lxml-xml", "html.parser", or "html5lib") or it may be the + type of markup to be used ("html", "html5", "xml"). It's + recommended that you name a specific parser, so that + Beautiful Soup gives you the same results across platforms + and virtual environments. + + :param builder: A TreeBuilder subclass to instantiate (or + instance to use) instead of looking one up based on + `features`. You only need to use this if you've implemented a + custom TreeBuilder. + + :param parse_only: A SoupStrainer. Only parts of the document + matching the SoupStrainer will be considered. This is useful + when parsing part of a document that would otherwise be too + large to fit into memory. + + :param from_encoding: A string indicating the encoding of the + document to be parsed. Pass this in if Beautiful Soup is + guessing wrongly about the document's encoding. + + :param exclude_encodings: A list of strings indicating + encodings known to be wrong. Pass this in if you don't know + the document's encoding but you know Beautiful Soup's guess is + wrong. + + :param element_classes: A dictionary mapping BeautifulSoup + classes like Tag and NavigableString, to other classes you'd + like to be instantiated instead as the parse tree is + built. This is useful for subclassing Tag or NavigableString + to modify default behavior. + + :param kwargs: For backwards compatibility purposes, the + constructor accepts certain keyword arguments used in + Beautiful Soup 3. None of these arguments do anything in + Beautiful Soup 4; they will result in a warning and then be + ignored. + + Apart from this, any keyword arguments passed into the + BeautifulSoup constructor are propagated to the TreeBuilder + constructor. This makes it possible to configure a + TreeBuilder by passing in arguments, not just by saying which + one to use. + """ + if "convertEntities" in kwargs: + del kwargs["convertEntities"] + warnings.warn( + "BS4 does not respect the convertEntities argument to the " + "BeautifulSoup constructor. Entities are always converted " + "to Unicode characters." + ) + + if "markupMassage" in kwargs: + del kwargs["markupMassage"] + warnings.warn( + "BS4 does not respect the markupMassage argument to the " + "BeautifulSoup constructor. The tree builder is responsible " + "for any necessary markup massage." + ) + + if "smartQuotesTo" in kwargs: + del kwargs["smartQuotesTo"] + warnings.warn( + "BS4 does not respect the smartQuotesTo argument to the " + "BeautifulSoup constructor. Smart quotes are always converted " + "to Unicode characters." + ) + + if "selfClosingTags" in kwargs: + del kwargs["selfClosingTags"] + warnings.warn( + "Beautiful Soup 4 does not respect the selfClosingTags argument to the " + "BeautifulSoup constructor. The tree builder is responsible " + "for understanding self-closing tags." + ) + + if "isHTML" in kwargs: + del kwargs["isHTML"] + warnings.warn( + "Beautiful Soup 4 does not respect the isHTML argument to the " + "BeautifulSoup constructor. Suggest you use " + "features='lxml' for HTML and features='lxml-xml' for " + "XML." + ) + + def deprecated_argument(old_name: str, new_name: str) -> Optional[Any]: + if old_name in kwargs: + warnings.warn( + 'The "%s" argument to the BeautifulSoup constructor ' + 'was renamed to "%s" in Beautiful Soup 4.0.0' + % (old_name, new_name), + DeprecationWarning, + stacklevel=3, + ) + return kwargs.pop(old_name) + return None + + parse_only = parse_only or deprecated_argument("parseOnlyThese", "parse_only") + if parse_only is not None: + # Issue a warning if we can tell in advance that + # parse_only will exclude the entire tree. + if parse_only.excludes_everything: + warnings.warn( + f"The given value for parse_only will exclude everything: {parse_only}", + UserWarning, + stacklevel=3, + ) + + from_encoding = from_encoding or deprecated_argument( + "fromEncoding", "from_encoding" + ) + + if from_encoding and isinstance(markup, str): + warnings.warn( + "You provided Unicode markup but also provided a value for from_encoding. Your from_encoding will be ignored." + ) + from_encoding = None + + self.element_classes = element_classes or dict() + + # We need this information to track whether or not the builder + # was specified well enough that we can omit the 'you need to + # specify a parser' warning. + original_builder = builder + original_features = features + + builder_class: Type[TreeBuilder] + if isinstance(builder, type): + # A builder class was passed in; it needs to be instantiated. + builder_class = builder + builder = None + elif builder is None: + if isinstance(features, str): + features = [features] + if features is None or len(features) == 0: + features = self.DEFAULT_BUILDER_FEATURES + possible_builder_class = builder_registry.lookup(*features) + if possible_builder_class is None: + raise FeatureNotFound( + "Couldn't find a tree builder with the features you " + "requested: %s. Do you need to install a parser library?" + % ",".join(features) + ) + builder_class = possible_builder_class + + # At this point either we have a TreeBuilder instance in + # builder, or we have a builder_class that we can instantiate + # with the remaining **kwargs. + if builder is None: + builder = builder_class(**kwargs) + if ( + not original_builder + and not ( + original_features == builder.NAME + or ( + isinstance(original_features, str) + and original_features in builder.ALTERNATE_NAMES + ) + ) + and markup + ): + # The user did not tell us which TreeBuilder to use, + # and we had to guess. Issue a warning. + if builder.is_xml: + markup_type = "XML" + else: + markup_type = "HTML" + + # This code adapted from warnings.py so that we get the same line + # of code as our warnings.warn() call gets, even if the answer is wrong + # (as it may be in a multithreading situation). + caller = None + try: + caller = sys._getframe(1) + except ValueError: + pass + if caller: + globals = caller.f_globals + line_number = caller.f_lineno + else: + globals = sys.__dict__ + line_number = 1 + filename = globals.get("__file__") + if filename: + fnl = filename.lower() + if fnl.endswith((".pyc", ".pyo")): + filename = filename[:-1] + if filename: + # If there is no filename at all, the user is most likely in a REPL, + # and the warning is not necessary. + values = dict( + filename=filename, + line_number=line_number, + parser=builder.NAME, + markup_type=markup_type, + ) + warnings.warn( + GuessedAtParserWarning.MESSAGE % values, + GuessedAtParserWarning, + stacklevel=2, + ) + else: + if kwargs: + warnings.warn( + "Keyword arguments to the BeautifulSoup constructor will be ignored. These would normally be passed into the TreeBuilder constructor, but a TreeBuilder instance was passed in as `builder`." + ) + + self.builder = builder + self.is_xml = builder.is_xml + self.known_xml = self.is_xml + self._namespaces = dict() + self.parse_only = parse_only + + if hasattr(markup, "read"): # It's a file-type object. + markup = markup.read() + elif not isinstance(markup, (bytes, str)) and not hasattr(markup, "__len__"): + raise TypeError( + f"Incoming markup is of an invalid type: {markup!r}. Markup must be a string, a bytestring, or an open filehandle." + ) + elif len(markup) <= 256 and ( + (isinstance(markup, bytes) and b"<" not in markup and b"\n" not in markup) + or (isinstance(markup, str) and "<" not in markup and "\n" not in markup) + ): + # Issue warnings for a couple beginner problems + # involving passing non-markup to Beautiful Soup. + # Beautiful Soup will still parse the input as markup, + # since that is sometimes the intended behavior. + if not self._markup_is_url(markup): + self._markup_resembles_filename(markup) + + # At this point we know markup is a string or bytestring. If + # it was a file-type object, we've read from it. + markup = cast(_RawMarkup, markup) + + rejections = [] + success = False + for ( + self.markup, + self.original_encoding, + self.declared_html_encoding, + self.contains_replacement_characters, + ) in self.builder.prepare_markup( + markup, from_encoding, exclude_encodings=exclude_encodings + ): + self.reset() + self.builder.initialize_soup(self) + try: + self._feed() + success = True + break + except ParserRejectedMarkup as e: + rejections.append(e) + pass + + if not success: + other_exceptions = [str(e) for e in rejections] + raise ParserRejectedMarkup( + "The markup you provided was rejected by the parser. Trying a different parser or a different encoding may help.\n\nOriginal exception(s) from parser:\n " + + "\n ".join(other_exceptions) + ) + + # Clear out the markup and remove the builder's circular + # reference to this object. + self.markup = None + self.builder.soup = None + + def copy_self(self) -> "BeautifulSoup": + """Create a new BeautifulSoup object with the same TreeBuilder, + but not associated with any markup. + + This is the first step of the deepcopy process. + """ + clone = type(self)("", None, self.builder) + + # Keep track of the encoding of the original document, + # since we won't be parsing it again. + clone.original_encoding = self.original_encoding + return clone + + def __getstate__(self) -> Dict[str, Any]: + # Frequently a tree builder can't be pickled. + d = dict(self.__dict__) + if "builder" in d and d["builder"] is not None and not self.builder.picklable: + d["builder"] = type(self.builder) + # Store the contents as a Unicode string. + d["contents"] = [] + d["markup"] = self.decode() + + # If _most_recent_element is present, it's a Tag object left + # over from initial parse. It might not be picklable and we + # don't need it. + if "_most_recent_element" in d: + del d["_most_recent_element"] + return d + + def __setstate__(self, state: Dict[str, Any]) -> None: + # If necessary, restore the TreeBuilder by looking it up. + self.__dict__ = state + if isinstance(self.builder, type): + self.builder = self.builder() + elif not self.builder: + # We don't know which builder was used to build this + # parse tree, so use a default we know is always available. + self.builder = HTMLParserTreeBuilder() + self.builder.soup = self + self.reset() + self._feed() + + @classmethod + @_deprecated( + replaced_by="nothing (private method, will be removed)", version="4.13.0" + ) + def _decode_markup(cls, markup: _RawMarkup) -> str: + """Ensure `markup` is Unicode so it's safe to send into warnings.warn. + + warnings.warn had this problem back in 2010 but fortunately + not anymore. This has not been used for a long time; I just + noticed that fact while working on 4.13.0. + """ + if isinstance(markup, bytes): + decoded = markup.decode("utf-8", "replace") + else: + decoded = markup + return decoded + + @classmethod + def _markup_is_url(cls, markup: _RawMarkup) -> bool: + """Error-handling method to raise a warning if incoming markup looks + like a URL. + + :param markup: A string of markup. + :return: Whether or not the markup resembled a URL + closely enough to justify issuing a warning. + """ + problem: bool = False + if isinstance(markup, bytes): + problem = ( + any(markup.startswith(prefix) for prefix in (b"http:", b"https:")) + and b" " not in markup + ) + elif isinstance(markup, str): + problem = ( + any(markup.startswith(prefix) for prefix in ("http:", "https:")) + and " " not in markup + ) + else: + return False + + if not problem: + return False + warnings.warn( + MarkupResemblesLocatorWarning.URL_MESSAGE % dict(what="URL"), + MarkupResemblesLocatorWarning, + stacklevel=3, + ) + return True + + @classmethod + def _markup_resembles_filename(cls, markup: _RawMarkup) -> bool: + """Error-handling method to issue a warning if incoming markup + resembles a filename. + + :param markup: A string of markup. + :return: Whether or not the markup resembled a filename + closely enough to justify issuing a warning. + """ + markup_b: bytes + + # We're only checking ASCII characters, so rather than write + # the same tests twice, convert Unicode to a bytestring and + # operate on the bytestring. + if isinstance(markup, str): + markup_b = markup.encode("utf8") + else: + markup_b = markup + + # Step 1: does it end with a common textual file extension? + filelike = False + lower = markup_b.lower() + extensions = [b".html", b".htm", b".xml", b".xhtml", b".txt"] + if any(lower.endswith(ext) for ext in extensions): + filelike = True + if not filelike: + return False + + # Step 2: it _might_ be a file, but there are a few things + # we can look for that aren't very common in filenames. + + # Characters that have special meaning to Unix shells. (< was + # excluded before this method was called.) + # + # Many of these are also reserved characters that cannot + # appear in Windows filenames. + for byte in markup_b: + if byte in b"?*#&;>$|": + return False + + # Two consecutive forward slashes (as seen in a URL) or two + # consecutive spaces (as seen in fixed-width data). + # + # (Paths to Windows network shares contain consecutive + # backslashes, so checking that doesn't seem as helpful.) + if b"//" in markup_b: + return False + if b" " in markup_b: + return False + + # A colon in any position other than position 1 (e.g. after a + # Windows drive letter). + if markup_b.startswith(b":"): + return False + colon_i = markup_b.rfind(b":") + if colon_i not in (-1, 1): + return False + + # Step 3: If it survived all of those checks, it's similar + # enough to a file to justify issuing a warning. + warnings.warn( + MarkupResemblesLocatorWarning.FILENAME_MESSAGE % dict(what="filename"), + MarkupResemblesLocatorWarning, + stacklevel=3, + ) + return True + + def _feed(self) -> None: + """Internal method that parses previously set markup, creating a large + number of Tag and NavigableString objects. + """ + # Convert the document to Unicode. + self.builder.reset() + + if self.markup is not None: + self.builder.feed(self.markup) + # Close out any unfinished strings and close all the open tags. + self.endData() + while ( + self.currentTag is not None and self.currentTag.name != self.ROOT_TAG_NAME + ): + self.popTag() + + def reset(self) -> None: + """Reset this object to a state as though it had never parsed any + markup. + """ + Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME) + self.hidden = True + self.builder.reset() + self.current_data = [] + self.currentTag = None + self.tagStack = [] + self.open_tag_counter = Counter() + self.preserve_whitespace_tag_stack = [] + self.string_container_stack = [] + self._most_recent_element = None + self.pushTag(self) + + def new_tag( + self, + name: str, + namespace: Optional[str] = None, + nsprefix: Optional[str] = None, + attrs: Optional[_RawAttributeValues] = None, + sourceline: Optional[int] = None, + sourcepos: Optional[int] = None, + string: Optional[str] = None, + **kwattrs: _RawAttributeValue, + ) -> Tag: + """Create a new Tag associated with this BeautifulSoup object. + + :param name: The name of the new Tag. + :param namespace: The URI of the new Tag's XML namespace, if any. + :param prefix: The prefix for the new Tag's XML namespace, if any. + :param attrs: A dictionary of this Tag's attribute values; can + be used instead of ``kwattrs`` for attributes like 'class' + that are reserved words in Python. + :param sourceline: The line number where this tag was + (purportedly) found in its source document. + :param sourcepos: The character position within ``sourceline`` where this + tag was (purportedly) found. + :param string: String content for the new Tag, if any. + :param kwattrs: Keyword arguments for the new Tag's attribute values. + + """ + attr_container = self.builder.attribute_dict_class(**kwattrs) + if attrs is not None: + attr_container.update(attrs) + tag_class = self.element_classes.get(Tag, Tag) + + # Assume that this is either Tag or a subclass of Tag. If not, + # the user brought type-unsafety upon themselves. + tag_class = cast(Type[Tag], tag_class) + tag = tag_class( + None, + self.builder, + name, + namespace, + nsprefix, + attr_container, + sourceline=sourceline, + sourcepos=sourcepos, + ) + + if string is not None: + tag.string = string + return tag + + def string_container( + self, base_class: Optional[Type[NavigableString]] = None + ) -> Type[NavigableString]: + """Find the class that should be instantiated to hold a given kind of + string. + + This may be a built-in Beautiful Soup class or a custom class passed + in to the BeautifulSoup constructor. + """ + container = base_class or NavigableString + + # The user may want us to use some other class (hopefully a + # custom subclass) instead of the one we'd use normally. + container = cast( + Type[NavigableString], self.element_classes.get(container, container) + ) + + # On top of that, we may be inside a tag that needs a special + # container class. + if self.string_container_stack and container is NavigableString: + container = self.builder.string_containers.get( + self.string_container_stack[-1].name, container + ) + return container + + def new_string( + self, s: str, subclass: Optional[Type[NavigableString]] = None + ) -> NavigableString: + """Create a new `NavigableString` associated with this `BeautifulSoup` + object. + + :param s: The string content of the `NavigableString` + :param subclass: The subclass of `NavigableString`, if any, to + use. If a document is being processed, an appropriate + subclass for the current location in the document will + be determined automatically. + """ + container = self.string_container(subclass) + return container(s) + + def insert_before(self, *args: _InsertableElement) -> List[PageElement]: + """This method is part of the PageElement API, but `BeautifulSoup` doesn't implement + it because there is nothing before or after it in the parse tree. + """ + raise NotImplementedError( + "BeautifulSoup objects don't support insert_before()." + ) + + def insert_after(self, *args: _InsertableElement) -> List[PageElement]: + """This method is part of the PageElement API, but `BeautifulSoup` doesn't implement + it because there is nothing before or after it in the parse tree. + """ + raise NotImplementedError("BeautifulSoup objects don't support insert_after().") + + def popTag(self) -> Optional[Tag]: + """Internal method called by _popToTag when a tag is closed. + + :meta private: + """ + if not self.tagStack: + # Nothing to pop. This shouldn't happen. + return None + tag = self.tagStack.pop() + if tag.name in self.open_tag_counter: + self.open_tag_counter[tag.name] -= 1 + if ( + self.preserve_whitespace_tag_stack + and tag == self.preserve_whitespace_tag_stack[-1] + ): + self.preserve_whitespace_tag_stack.pop() + if self.string_container_stack and tag == self.string_container_stack[-1]: + self.string_container_stack.pop() + # print("Pop", tag.name) + if self.tagStack: + self.currentTag = self.tagStack[-1] + return self.currentTag + + def pushTag(self, tag: Tag) -> None: + """Internal method called by handle_starttag when a tag is opened. + + :meta private: + """ + # print("Push", tag.name) + if self.currentTag is not None: + self.currentTag.contents.append(tag) + self.tagStack.append(tag) + self.currentTag = self.tagStack[-1] + if tag.name != self.ROOT_TAG_NAME: + self.open_tag_counter[tag.name] += 1 + if tag.name in self.builder.preserve_whitespace_tags: + self.preserve_whitespace_tag_stack.append(tag) + if tag.name in self.builder.string_containers: + self.string_container_stack.append(tag) + + def endData(self, containerClass: Optional[Type[NavigableString]] = None) -> None: + """Method called by the TreeBuilder when the end of a data segment + occurs. + + :param containerClass: The class to use when incorporating the + data segment into the parse tree. + + :meta private: + """ + if self.current_data: + current_data = "".join(self.current_data) + # If whitespace is not preserved, and this string contains + # nothing but ASCII spaces, replace it with a single space + # or newline. + if not self.preserve_whitespace_tag_stack: + strippable = True + for i in current_data: + if i not in self.ASCII_SPACES: + strippable = False + break + if strippable: + if "\n" in current_data: + current_data = "\n" + else: + current_data = " " + + # Reset the data collector. + self.current_data = [] + + # Should we add this string to the tree at all? + if ( + self.parse_only + and len(self.tagStack) <= 1 + and (not self.parse_only.allow_string_creation(current_data)) + ): + return + + containerClass = self.string_container(containerClass) + o = containerClass(current_data) + self.object_was_parsed(o) + + def object_was_parsed( + self, + o: PageElement, + parent: Optional[Tag] = None, + most_recent_element: Optional[PageElement] = None, + ) -> None: + """Method called by the TreeBuilder to integrate an object into the + parse tree. + + :meta private: + """ + if parent is None: + parent = self.currentTag + assert parent is not None + previous_element: Optional[PageElement] + if most_recent_element is not None: + previous_element = most_recent_element + else: + previous_element = self._most_recent_element + + next_element = previous_sibling = next_sibling = None + if isinstance(o, Tag): + next_element = o.next_element + next_sibling = o.next_sibling + previous_sibling = o.previous_sibling + if previous_element is None: + previous_element = o.previous_element + + fix = parent.next_element is not None + + o.setup(parent, previous_element, next_element, previous_sibling, next_sibling) + + self._most_recent_element = o + parent.contents.append(o) + + # Check if we are inserting into an already parsed node. + if fix: + self._linkage_fixer(parent) + + def _linkage_fixer(self, el: Tag) -> None: + """Make sure linkage of this fragment is sound.""" + + first = el.contents[0] + child = el.contents[-1] + descendant: PageElement = child + + if child is first and el.parent is not None: + # Parent should be linked to first child + el.next_element = child + # We are no longer linked to whatever this element is + prev_el = child.previous_element + if prev_el is not None and prev_el is not el: + prev_el.next_element = None + # First child should be linked to the parent, and no previous siblings. + child.previous_element = el + child.previous_sibling = None + + # We have no sibling as we've been appended as the last. + child.next_sibling = None + + # This index is a tag, dig deeper for a "last descendant" + if isinstance(child, Tag) and child.contents: + # _last_decendant is typed as returning Optional[PageElement], + # but the value can't be None here, because el is a Tag + # which we know has contents. + descendant = cast(PageElement, child._last_descendant(False)) + + # As the final step, link last descendant. It should be linked + # to the parent's next sibling (if found), else walk up the chain + # and find a parent with a sibling. It should have no next sibling. + descendant.next_element = None + descendant.next_sibling = None + + target: Optional[Tag] = el + while True: + if target is None: + break + elif target.next_sibling is not None: + descendant.next_element = target.next_sibling + target.next_sibling.previous_element = child + break + target = target.parent + + def _popToTag( + self, name: str, nsprefix: Optional[str] = None, inclusivePop: bool = True + ) -> Optional[Tag]: + """Pops the tag stack up to and including the most recent + instance of the given tag. + + If there are no open tags with the given name, nothing will be + popped. + + :param name: Pop up to the most recent tag with this name. + :param nsprefix: The namespace prefix that goes with `name`. + :param inclusivePop: It this is false, pops the tag stack up + to but *not* including the most recent instqance of the + given tag. + + :meta private: + """ + # print("Popping to %s" % name) + if name == self.ROOT_TAG_NAME: + # The BeautifulSoup object itself can never be popped. + return None + + most_recently_popped = None + + stack_size = len(self.tagStack) + for i in range(stack_size - 1, 0, -1): + if not self.open_tag_counter.get(name): + break + t = self.tagStack[i] + if name == t.name and nsprefix == t.prefix: + if inclusivePop: + most_recently_popped = self.popTag() + break + most_recently_popped = self.popTag() + + return most_recently_popped + + def handle_starttag( + self, + name: str, + namespace: Optional[str], + nsprefix: Optional[str], + attrs: _RawAttributeValues, + sourceline: Optional[int] = None, + sourcepos: Optional[int] = None, + namespaces: Optional[Dict[str, str]] = None, + ) -> Optional[Tag]: + """Called by the tree builder when a new tag is encountered. + + :param name: Name of the tag. + :param nsprefix: Namespace prefix for the tag. + :param attrs: A dictionary of attribute values. Note that + attribute values are expected to be simple strings; processing + of multi-valued attributes such as "class" comes later. + :param sourceline: The line number where this tag was found in its + source document. + :param sourcepos: The character position within `sourceline` where this + tag was found. + :param namespaces: A dictionary of all namespace prefix mappings + currently in scope in the document. + + If this method returns None, the tag was rejected by an active + `ElementFilter`. You should proceed as if the tag had not occurred + in the document. For instance, if this was a self-closing tag, + don't call handle_endtag. + + :meta private: + """ + # print("Start tag %s: %s" % (name, attrs)) + self.endData() + + if ( + self.parse_only + and len(self.tagStack) <= 1 + and not self.parse_only.allow_tag_creation(nsprefix, name, attrs) + ): + return None + + tag_class = self.element_classes.get(Tag, Tag) + # Assume that this is either Tag or a subclass of Tag. If not, + # the user brought type-unsafety upon themselves. + tag_class = cast(Type[Tag], tag_class) + tag = tag_class( + self, + self.builder, + name, + namespace, + nsprefix, + attrs, + self.currentTag, + self._most_recent_element, + sourceline=sourceline, + sourcepos=sourcepos, + namespaces=namespaces, + ) + if tag is None: + return tag + if self._most_recent_element is not None: + self._most_recent_element.next_element = tag + self._most_recent_element = tag + self.pushTag(tag) + return tag + + def handle_endtag(self, name: str, nsprefix: Optional[str] = None) -> None: + """Called by the tree builder when an ending tag is encountered. + + :param name: Name of the tag. + :param nsprefix: Namespace prefix for the tag. + + :meta private: + """ + # print("End tag: " + name) + self.endData() + self._popToTag(name, nsprefix) + + def handle_data(self, data: str) -> None: + """Called by the tree builder when a chunk of textual data is + encountered. + + :meta private: + """ + self.current_data.append(data) + + def decode( + self, + indent_level: Optional[int] = None, + eventual_encoding: _Encoding = DEFAULT_OUTPUT_ENCODING, + formatter: Union[Formatter, str] = "minimal", + iterator: Optional[Iterator[PageElement]] = None, + **kwargs: Any, + ) -> str: + """Returns a string representation of the parse tree + as a full HTML or XML document. + + :param indent_level: Each line of the rendering will be + indented this many levels. (The ``formatter`` decides what a + 'level' means, in terms of spaces or other characters + output.) This is used internally in recursive calls while + pretty-printing. + :param eventual_encoding: The encoding of the final document. + If this is None, the document will be a Unicode string. + :param formatter: Either a `Formatter` object, or a string naming one of + the standard formatters. + :param iterator: The iterator to use when navigating over the + parse tree. This is only used by `Tag.decode_contents` and + you probably won't need to use it. + """ + if self.is_xml: + # Print the XML declaration + encoding_part = "" + declared_encoding: Optional[str] = eventual_encoding + if eventual_encoding in PYTHON_SPECIFIC_ENCODINGS: + # This is a special Python encoding; it can't actually + # go into an XML document because it means nothing + # outside of Python. + declared_encoding = None + if declared_encoding is not None: + encoding_part = ' encoding="%s"' % declared_encoding + prefix = '\n' % encoding_part + else: + prefix = "" + + # Prior to 4.13.0, the first argument to this method was a + # bool called pretty_print, which gave the method a different + # signature from its superclass implementation, Tag.decode. + # + # The signatures of the two methods now match, but just in + # case someone is still passing a boolean in as the first + # argument to this method (or a keyword argument with the old + # name), we can handle it and put out a DeprecationWarning. + warning: Optional[str] = None + if isinstance(indent_level, bool): + if indent_level is True: + indent_level = 0 + elif indent_level is False: + indent_level = None + warning = f"As of 4.13.0, the first argument to BeautifulSoup.decode has been changed from bool to int, to match Tag.decode. Pass in a value of {indent_level} instead." + else: + pretty_print = kwargs.pop("pretty_print", None) + assert not kwargs + if pretty_print is not None: + if pretty_print is True: + indent_level = 0 + elif pretty_print is False: + indent_level = None + warning = f"As of 4.13.0, the pretty_print argument to BeautifulSoup.decode has been removed, to match Tag.decode. Pass in a value of indent_level={indent_level} instead." + + if warning: + warnings.warn(warning, DeprecationWarning, stacklevel=2) + elif indent_level is False or pretty_print is False: + indent_level = None + return prefix + super(BeautifulSoup, self).decode( + indent_level, eventual_encoding, formatter, iterator + ) + + +# Aliases to make it easier to get started quickly, e.g. 'from bs4 import _soup' +_s = BeautifulSoup +_soup = BeautifulSoup + + +class BeautifulStoneSoup(BeautifulSoup): + """Deprecated interface to an XML parser.""" + + def __init__(self, *args: Any, **kwargs: Any): + kwargs["features"] = "xml" + warnings.warn( + "The BeautifulStoneSoup class was deprecated in version 4.0.0. Instead of using " + 'it, pass features="xml" into the BeautifulSoup constructor.', + DeprecationWarning, + stacklevel=2, + ) + super(BeautifulStoneSoup, self).__init__(*args, **kwargs) + + +# If this file is run as a script, act as an HTML pretty-printer. +if __name__ == "__main__": + import sys + + soup = BeautifulSoup(sys.stdin) + print((soup.prettify())) diff --git a/venv/lib/python3.12/site-packages/bs4/_deprecation.py b/venv/lib/python3.12/site-packages/bs4/_deprecation.py new file mode 100644 index 0000000000..a0d7fdc0a0 --- /dev/null +++ b/venv/lib/python3.12/site-packages/bs4/_deprecation.py @@ -0,0 +1,80 @@ +"""Helper functions for deprecation. + +This interface is itself unstable and may change without warning. Do +not use these functions yourself, even as a joke. The underscores are +there for a reason. No support will be given. + +In particular, most of this will go away without warning once +Beautiful Soup drops support for Python 3.11, since Python 3.12 +defines a `@typing.deprecated() +decorator. `_ +""" + +import functools +import warnings + +from typing import ( + Any, + Callable, +) + + +def _deprecated_alias(old_name: str, new_name: str, version: str): + """Alias one attribute name to another for backward compatibility + + :meta private: + """ + + @property + def alias(self) -> Any: + ":meta private:" + warnings.warn( + f"Access to deprecated property {old_name}. (Replaced by {new_name}) -- Deprecated since version {version}.", + DeprecationWarning, + stacklevel=2, + ) + return getattr(self, new_name) + + @alias.setter + def alias(self, value: str) -> None: + ":meta private:" + warnings.warn( + f"Write to deprecated property {old_name}. (Replaced by {new_name}) -- Deprecated since version {version}.", + DeprecationWarning, + stacklevel=2, + ) + return setattr(self, new_name, value) + + return alias + + +def _deprecated_function_alias( + old_name: str, new_name: str, version: str +) -> Callable[[Any], Any]: + def alias(self, *args: Any, **kwargs: Any) -> Any: + ":meta private:" + warnings.warn( + f"Call to deprecated method {old_name}. (Replaced by {new_name}) -- Deprecated since version {version}.", + DeprecationWarning, + stacklevel=2, + ) + return getattr(self, new_name)(*args, **kwargs) + + return alias + + +def _deprecated(replaced_by: str, version: str) -> Callable: + def deprecate(func: Callable) -> Callable: + @functools.wraps(func) + def with_warning(*args: Any, **kwargs: Any) -> Any: + ":meta private:" + warnings.warn( + f"Call to deprecated method {func.__name__}. (Replaced by {replaced_by}) -- Deprecated since version {version}.", + DeprecationWarning, + stacklevel=2, + ) + return func(*args, **kwargs) + + return with_warning + + return deprecate diff --git a/venv/lib/python3.12/site-packages/bs4/_typing.py b/venv/lib/python3.12/site-packages/bs4/_typing.py new file mode 100644 index 0000000000..ac4ec3405f --- /dev/null +++ b/venv/lib/python3.12/site-packages/bs4/_typing.py @@ -0,0 +1,196 @@ +# Custom type aliases used throughout Beautiful Soup to improve readability. + +# Notes on improvements to the type system in newer versions of Python +# that can be used once Beautiful Soup drops support for older +# versions: +# +# * ClassVar can be put on class variables now. +# * In 3.10, x|y is an accepted shorthand for Union[x,y]. +# * In 3.10, TypeAlias gains capabilities that can be used to +# improve the tree matching types (I don't remember what, exactly). +# * In 3.9 it's possible to specialize the re.Match type, +# e.g. re.Match[str]. In 3.8 there's a typing.re namespace for this, +# but it's removed in 3.12, so to support the widest possible set of +# versions I'm not using it. + +from typing_extensions import ( + runtime_checkable, + Protocol, + TypeAlias, +) +from typing import ( + Any, + Callable, + Dict, + IO, + Iterable, + Mapping, + Optional, + Pattern, + TYPE_CHECKING, + Union, +) + +if TYPE_CHECKING: + from bs4.element import ( + AttributeValueList, + NamespacedAttribute, + NavigableString, + PageElement, + ResultSet, + Tag, + ) + + +@runtime_checkable +class _RegularExpressionProtocol(Protocol): + """A protocol object which can accept either Python's built-in + `re.Pattern` objects, or the similar ``Regex`` objects defined by the + third-party ``regex`` package. + """ + + def search( + self, string: str, pos: int = ..., endpos: int = ... + ) -> Optional[Any]: ... + + @property + def pattern(self) -> str: ... + + +# Aliases for markup in various stages of processing. +# +#: The rawest form of markup: either a string, bytestring, or an open filehandle. +_IncomingMarkup: TypeAlias = Union[str, bytes, IO[str], IO[bytes]] + +#: Markup that is in memory but has (potentially) yet to be converted +#: to Unicode. +_RawMarkup: TypeAlias = Union[str, bytes] + +# Aliases for character encodings +# + +#: A data encoding. +_Encoding: TypeAlias = str + +#: One or more data encodings. +_Encodings: TypeAlias = Iterable[_Encoding] + +# Aliases for XML namespaces +# + +#: The prefix for an XML namespace. +_NamespacePrefix: TypeAlias = str + +#: The URL of an XML namespace +_NamespaceURL: TypeAlias = str + +#: A mapping of prefixes to namespace URLs. +_NamespaceMapping: TypeAlias = Dict[_NamespacePrefix, _NamespaceURL] + +#: A mapping of namespace URLs to prefixes +_InvertedNamespaceMapping: TypeAlias = Dict[_NamespaceURL, _NamespacePrefix] + +# Aliases for the attribute values associated with HTML/XML tags. +# + +#: The value associated with an HTML or XML attribute. This is the +#: relatively unprocessed value Beautiful Soup expects to come from a +#: `TreeBuilder`. +_RawAttributeValue: TypeAlias = str + +#: A dictionary of names to `_RawAttributeValue` objects. This is how +#: Beautiful Soup expects a `TreeBuilder` to represent a tag's +#: attribute values. +_RawAttributeValues: TypeAlias = ( + "Mapping[Union[str, NamespacedAttribute], _RawAttributeValue]" +) + +#: An attribute value in its final form, as stored in the +# `Tag` class, after it has been processed and (in some cases) +# split into a list of strings. +_AttributeValue: TypeAlias = Union[str, "AttributeValueList"] + +#: A dictionary of names to :py:data:`_AttributeValue` objects. This is what +#: a tag's attributes look like after processing. +_AttributeValues: TypeAlias = Dict[str, _AttributeValue] + +#: The methods that deal with turning :py:data:`_RawAttributeValue` into +#: :py:data:`_AttributeValue` may be called several times, even after the values +#: are already processed (e.g. when cloning a tag), so they need to +#: be able to acommodate both possibilities. +_RawOrProcessedAttributeValues: TypeAlias = Union[_RawAttributeValues, _AttributeValues] + +#: A number of tree manipulation methods can take either a `PageElement` or a +#: normal Python string (which will be converted to a `NavigableString`). +_InsertableElement: TypeAlias = Union["PageElement", str] + +# Aliases to represent the many possibilities for matching bits of a +# parse tree. +# +# This is very complicated because we're applying a formal type system +# to some very DWIM code. The types we end up with will be the types +# of the arguments to the SoupStrainer constructor and (more +# familiarly to Beautiful Soup users) the find* methods. + +#: A function that takes a PageElement and returns a yes-or-no answer. +_PageElementMatchFunction: TypeAlias = Callable[["PageElement"], bool] + +#: A function that takes the raw parsed ingredients of a markup tag +#: and returns a yes-or-no answer. +# Not necessary at the moment. +# _AllowTagCreationFunction:TypeAlias = Callable[[Optional[str], str, Optional[_RawAttributeValues]], bool] + +#: A function that takes the raw parsed ingredients of a markup string node +#: and returns a yes-or-no answer. +# Not necessary at the moment. +# _AllowStringCreationFunction:TypeAlias = Callable[[Optional[str]], bool] + +#: A function that takes a `Tag` and returns a yes-or-no answer. +#: A `TagNameMatchRule` expects this kind of function, if you're +#: going to pass it a function. +_TagMatchFunction: TypeAlias = Callable[["Tag"], bool] + +#: A function that takes a single string and returns a yes-or-no +#: answer. An `AttributeValueMatchRule` expects this kind of function, if +#: you're going to pass it a function. So does a `StringMatchRule`. +_StringMatchFunction: TypeAlias = Callable[[str], bool] + +#: Either a tag name, an attribute value or a string can be matched +#: against a string, bytestring, regular expression, or a boolean. +_BaseStrainable: TypeAlias = Union[str, bytes, Pattern[str], bool] + +#: A tag can be matched either with the `_BaseStrainable` options, or +#: using a function that takes the `Tag` as its sole argument. +_BaseStrainableElement: TypeAlias = Union[_BaseStrainable, _TagMatchFunction] + +#: A tag's attribute vgalue can be matched either with the +#: `_BaseStrainable` options, or using a function that takes that +#: value as its sole argument. +_BaseStrainableAttribute: TypeAlias = Union[_BaseStrainable, _StringMatchFunction] + +#: A tag can be matched using either a single criterion or a list of +#: criteria. +_StrainableElement: TypeAlias = Union[ + _BaseStrainableElement, Iterable[_BaseStrainableElement] +] + +#: An attribute value can be matched using either a single criterion +#: or a list of criteria. +_StrainableAttribute: TypeAlias = Union[ + _BaseStrainableAttribute, Iterable[_BaseStrainableAttribute] +] + +#: An string can be matched using the same techniques as +#: an attribute value. +_StrainableString: TypeAlias = _StrainableAttribute + +#: A dictionary may be used to match against multiple attribute vlaues at once. +_StrainableAttributes: TypeAlias = Dict[str, _StrainableAttribute] + +#: Many Beautiful soup methods return a PageElement or an ResultSet of +#: PageElements. A PageElement is either a Tag or a NavigableString. +#: These convenience aliases make it easier for IDE users to see which methods +#: are available on the objects they're dealing with. +_OneElement: TypeAlias = Union["PageElement", "Tag", "NavigableString"] +_AtMostOneElement: TypeAlias = Optional[_OneElement] +_QueryResults: TypeAlias = "ResultSet[_OneElement]" diff --git a/venv/lib/python3.12/site-packages/bs4/_warnings.py b/venv/lib/python3.12/site-packages/bs4/_warnings.py new file mode 100644 index 0000000000..4309473045 --- /dev/null +++ b/venv/lib/python3.12/site-packages/bs4/_warnings.py @@ -0,0 +1,98 @@ +"""Define some custom warnings.""" + + +class GuessedAtParserWarning(UserWarning): + """The warning issued when BeautifulSoup has to guess what parser to + use -- probably because no parser was specified in the constructor. + """ + + MESSAGE: str = """No parser was explicitly specified, so I'm using the best available %(markup_type)s parser for this system ("%(parser)s"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently. + +The code that caused this warning is on line %(line_number)s of the file %(filename)s. To get rid of this warning, pass the additional argument 'features="%(parser)s"' to the BeautifulSoup constructor. +""" + + +class UnusualUsageWarning(UserWarning): + """A superclass for warnings issued when Beautiful Soup sees + something that is typically the result of a mistake in the calling + code, but might be intentional on the part of the user. If it is + in fact intentional, you can filter the individual warning class + to get rid of the warning. If you don't like Beautiful Soup + second-guessing what you are doing, you can filter the + UnusualUsageWarningclass itself and get rid of these entirely. + """ + + +class MarkupResemblesLocatorWarning(UnusualUsageWarning): + """The warning issued when BeautifulSoup is given 'markup' that + actually looks like a resource locator -- a URL or a path to a file + on disk. + """ + + #: :meta private: + GENERIC_MESSAGE: str = """ + +However, if you want to parse some data that happens to look like a %(what)s, then nothing has gone wrong: you are using Beautiful Soup correctly, and this warning is spurious and can be filtered. To make this warning go away, run this code before calling the BeautifulSoup constructor: + + from bs4 import MarkupResemblesLocatorWarning + import warnings + + warnings.filterwarnings("ignore", category=MarkupResemblesLocatorWarning) + """ + + URL_MESSAGE: str = ( + """The input passed in on this line looks more like a URL than HTML or XML. + +If you meant to use Beautiful Soup to parse the web page found at a certain URL, then something has gone wrong. You should use an Python package like 'requests' to fetch the content behind the URL. Once you have the content as a string, you can feed that string into Beautiful Soup.""" + + GENERIC_MESSAGE + ) + + FILENAME_MESSAGE: str = ( + """The input passed in on this line looks more like a filename than HTML or XML. + +If you meant to use Beautiful Soup to parse the contents of a file on disk, then something has gone wrong. You should open the file first, using code like this: + + filehandle = open(your filename) + +You can then feed the open filehandle into Beautiful Soup instead of using the filename.""" + + GENERIC_MESSAGE + ) + + +class AttributeResemblesVariableWarning(UnusualUsageWarning, SyntaxWarning): + """The warning issued when Beautiful Soup suspects a provided + attribute name may actually be the misspelled name of a Beautiful + Soup variable. Generally speaking, this is only used in cases like + "_class" where it's very unlikely the user would be referencing an + XML attribute with that name. + """ + + MESSAGE: str = """%(original)r is an unusual attribute name and is a common misspelling for %(autocorrect)r. + +If you meant %(autocorrect)r, change your code to use it, and this warning will go away. + +If you really did mean to check the %(original)r attribute, this warning is spurious and can be filtered. To make it go away, run this code before creating your BeautifulSoup object: + + from bs4 import AttributeResemblesVariableWarning + import warnings + + warnings.filterwarnings("ignore", category=AttributeResemblesVariableWarning) +""" + + +class XMLParsedAsHTMLWarning(UnusualUsageWarning): + """The warning issued when an HTML parser is used to parse + XML that is not (as far as we can tell) XHTML. + """ + + MESSAGE: str = """It looks like you're using an HTML parser to parse an XML document. + +Assuming this really is an XML document, what you're doing might work, but you should know that using an XML parser will be more reliable. To parse this document as XML, make sure you have the Python package 'lxml' installed, and pass the keyword argument `features="xml"` into the BeautifulSoup constructor. + +If you want or need to use an HTML parser on this document, you can make this warning go away by filtering it. To do that, run this code before calling the BeautifulSoup constructor: + + from bs4 import XMLParsedAsHTMLWarning + import warnings + + warnings.filterwarnings("ignore", category=XMLParsedAsHTMLWarning) +""" diff --git a/venv/lib/python3.12/site-packages/bs4/builder/__init__.py b/venv/lib/python3.12/site-packages/bs4/builder/__init__.py new file mode 100644 index 0000000000..5f2b38de2f --- /dev/null +++ b/venv/lib/python3.12/site-packages/bs4/builder/__init__.py @@ -0,0 +1,848 @@ +from __future__ import annotations + +# Use of this source code is governed by the MIT license. +__license__ = "MIT" + +from collections import defaultdict +import re +from types import ModuleType +from typing import ( + Any, + cast, + Dict, + Iterable, + List, + Optional, + Pattern, + Set, + Tuple, + Type, + TYPE_CHECKING, +) +import warnings +import sys +from bs4.element import ( + AttributeDict, + AttributeValueList, + CharsetMetaAttributeValue, + ContentMetaAttributeValue, + RubyParenthesisString, + RubyTextString, + Stylesheet, + Script, + TemplateString, + nonwhitespace_re, +) + +# Exceptions were moved to their own module in 4.13. Import here for +# backwards compatibility. +from bs4.exceptions import ParserRejectedMarkup + +from bs4._typing import ( + _AttributeValues, + _RawAttributeValue, +) + +from bs4._warnings import XMLParsedAsHTMLWarning + +if TYPE_CHECKING: + from bs4 import BeautifulSoup + from bs4.element import ( + NavigableString, + Tag, + ) + from bs4._typing import ( + _AttributeValue, + _Encoding, + _Encodings, + _RawOrProcessedAttributeValues, + _RawMarkup, + ) + +__all__ = [ + "HTMLTreeBuilder", + "SAXTreeBuilder", + "TreeBuilder", + "TreeBuilderRegistry", +] + +# Some useful features for a TreeBuilder to have. +FAST = "fast" +PERMISSIVE = "permissive" +STRICT = "strict" +XML = "xml" +HTML = "html" +HTML_5 = "html5" + +__all__ = [ + "TreeBuilderRegistry", + "TreeBuilder", + "HTMLTreeBuilder", + "DetectsXMLParsedAsHTML", + + "ParserRejectedMarkup", # backwards compatibility only as of 4.13.0 +] + +class TreeBuilderRegistry(object): + """A way of looking up TreeBuilder subclasses by their name or by desired + features. + """ + + builders_for_feature: Dict[str, List[Type[TreeBuilder]]] + builders: List[Type[TreeBuilder]] + + def __init__(self) -> None: + self.builders_for_feature = defaultdict(list) + self.builders = [] + + def register(self, treebuilder_class: type[TreeBuilder]) -> None: + """Register a treebuilder based on its advertised features. + + :param treebuilder_class: A subclass of `TreeBuilder`. its + `TreeBuilder.features` attribute should list its features. + """ + for feature in treebuilder_class.features: + self.builders_for_feature[feature].insert(0, treebuilder_class) + self.builders.insert(0, treebuilder_class) + + def lookup(self, *features: str) -> Optional[Type[TreeBuilder]]: + """Look up a TreeBuilder subclass with the desired features. + + :param features: A list of features to look for. If none are + provided, the most recently registered TreeBuilder subclass + will be used. + :return: A TreeBuilder subclass, or None if there's no + registered subclass with all the requested features. + """ + if len(self.builders) == 0: + # There are no builders at all. + return None + + if len(features) == 0: + # They didn't ask for any features. Give them the most + # recently registered builder. + return self.builders[0] + + # Go down the list of features in order, and eliminate any builders + # that don't match every feature. + feature_list = list(features) + feature_list.reverse() + candidates = None + candidate_set = None + while len(feature_list) > 0: + feature = feature_list.pop() + we_have_the_feature = self.builders_for_feature.get(feature, []) + if len(we_have_the_feature) > 0: + if candidates is None: + candidates = we_have_the_feature + candidate_set = set(candidates) + else: + # Eliminate any candidates that don't have this feature. + candidate_set = candidate_set.intersection(set(we_have_the_feature)) + + # The only valid candidates are the ones in candidate_set. + # Go through the original list of candidates and pick the first one + # that's in candidate_set. + if candidate_set is None or candidates is None: + return None + for candidate in candidates: + if candidate in candidate_set: + return candidate + return None + + +#: The `BeautifulSoup` constructor will take a list of features +#: and use it to look up `TreeBuilder` classes in this registry. +builder_registry: TreeBuilderRegistry = TreeBuilderRegistry() + + +class TreeBuilder(object): + """Turn a textual document into a Beautiful Soup object tree. + + This is an abstract superclass which smooths out the behavior of + different parser libraries into a single, unified interface. + + :param multi_valued_attributes: If this is set to None, the + TreeBuilder will not turn any values for attributes like + 'class' into lists. Setting this to a dictionary will + customize this behavior; look at :py:attr:`bs4.builder.HTMLTreeBuilder.DEFAULT_CDATA_LIST_ATTRIBUTES` + for an example. + + Internally, these are called "CDATA list attributes", but that + probably doesn't make sense to an end-user, so the argument name + is ``multi_valued_attributes``. + + :param preserve_whitespace_tags: A set of tags to treat + the way
 tags are treated in HTML. Tags in this set
+     are immune from pretty-printing; their contents will always be
+     output as-is.
+
+    :param string_containers: A dictionary mapping tag names to
+     the classes that should be instantiated to contain the textual
+     contents of those tags. The default is to use NavigableString
+     for every tag, no matter what the name. You can override the
+     default by changing :py:attr:`DEFAULT_STRING_CONTAINERS`.
+
+    :param store_line_numbers: If the parser keeps track of the line
+     numbers and positions of the original markup, that information
+     will, by default, be stored in each corresponding
+     :py:class:`bs4.element.Tag` object. You can turn this off by
+     passing store_line_numbers=False; then Tag.sourcepos and
+     Tag.sourceline will always be None. If the parser you're using
+     doesn't keep track of this information, then store_line_numbers
+     is irrelevant.
+
+    :param attribute_dict_class: The value of a multi-valued attribute
+      (such as HTML's 'class') willl be stored in an instance of this
+      class.  The default is Beautiful Soup's built-in
+      `AttributeValueList`, which is a normal Python list, and you
+      will probably never need to change it.
+    """
+
+    USE_DEFAULT: Any = object()  #: :meta private:
+
+    def __init__(
+        self,
+        multi_valued_attributes: Dict[str, Set[str]] = USE_DEFAULT,
+        preserve_whitespace_tags: Set[str] = USE_DEFAULT,
+        store_line_numbers: bool = USE_DEFAULT,
+        string_containers: Dict[str, Type[NavigableString]] = USE_DEFAULT,
+        empty_element_tags: Set[str] = USE_DEFAULT,
+        attribute_dict_class: Type[AttributeDict] = AttributeDict,
+        attribute_value_list_class: Type[AttributeValueList] = AttributeValueList,
+    ):
+        self.soup = None
+        if multi_valued_attributes is self.USE_DEFAULT:
+            multi_valued_attributes = self.DEFAULT_CDATA_LIST_ATTRIBUTES
+        self.cdata_list_attributes = multi_valued_attributes
+        if preserve_whitespace_tags is self.USE_DEFAULT:
+            preserve_whitespace_tags = self.DEFAULT_PRESERVE_WHITESPACE_TAGS
+        self.preserve_whitespace_tags = preserve_whitespace_tags
+        if empty_element_tags is self.USE_DEFAULT:
+            self.empty_element_tags = self.DEFAULT_EMPTY_ELEMENT_TAGS
+        else:
+            self.empty_element_tags = empty_element_tags
+        # TODO: store_line_numbers is probably irrelevant now that
+        # the behavior of sourceline and sourcepos has been made consistent
+        # everywhere.
+        if store_line_numbers == self.USE_DEFAULT:
+            store_line_numbers = self.TRACKS_LINE_NUMBERS
+        self.store_line_numbers = store_line_numbers
+        if string_containers == self.USE_DEFAULT:
+            string_containers = self.DEFAULT_STRING_CONTAINERS
+        self.string_containers = string_containers
+        self.attribute_dict_class = attribute_dict_class
+        self.attribute_value_list_class = attribute_value_list_class
+
+    NAME: str = "[Unknown tree builder]"
+    ALTERNATE_NAMES: Iterable[str] = []
+    features: Iterable[str] = []
+
+    is_xml: bool = False
+    picklable: bool = False
+
+    soup: Optional[BeautifulSoup]  #: :meta private:
+
+    #: A tag will be considered an empty-element
+    #: tag when and only when it has no contents.
+    empty_element_tags: Optional[Set[str]] = None  #: :meta private:
+    cdata_list_attributes: Dict[str, Set[str]]  #: :meta private:
+    preserve_whitespace_tags: Set[str]  #: :meta private:
+    string_containers: Dict[str, Type[NavigableString]]  #: :meta private:
+    tracks_line_numbers: bool  #: :meta private:
+
+    #: A value for these tag/attribute combinations is a space- or
+    #: comma-separated list of CDATA, rather than a single CDATA.
+    DEFAULT_CDATA_LIST_ATTRIBUTES: Dict[str, Set[str]] = defaultdict(set)
+
+    #: Whitespace should be preserved inside these tags.
+    DEFAULT_PRESERVE_WHITESPACE_TAGS: Set[str] = set()
+
+    #: The textual contents of tags with these names should be
+    #: instantiated with some class other than `bs4.element.NavigableString`.
+    DEFAULT_STRING_CONTAINERS: Dict[str, Type[bs4.element.NavigableString]] = {}
+
+    #: By default, tags are treated as empty-element tags if they have
+    #: no contents--that is, using XML rules. HTMLTreeBuilder
+    #: defines a different set of DEFAULT_EMPTY_ELEMENT_TAGS based on the
+    #: HTML 4 and HTML5 standards.
+    DEFAULT_EMPTY_ELEMENT_TAGS: Optional[Set[str]] = None
+
+    #: Most parsers don't keep track of line numbers.
+    TRACKS_LINE_NUMBERS: bool = False
+
+    def initialize_soup(self, soup: BeautifulSoup) -> None:
+        """The BeautifulSoup object has been initialized and is now
+        being associated with the TreeBuilder.
+
+        :param soup: A BeautifulSoup object.
+        """
+        self.soup = soup
+
+    def reset(self) -> None:
+        """Do any work necessary to reset the underlying parser
+        for a new document.
+
+        By default, this does nothing.
+        """
+        pass
+
+    def can_be_empty_element(self, tag_name: str) -> bool:
+        """Might a tag with this name be an empty-element tag?
+
+        The final markup may or may not actually present this tag as
+        self-closing.
+
+        For instance: an HTMLBuilder does not consider a 

tag to be + an empty-element tag (it's not in + HTMLBuilder.empty_element_tags). This means an empty

tag + will be presented as "

", not "

" or "

". + + The default implementation has no opinion about which tags are + empty-element tags, so a tag will be presented as an + empty-element tag if and only if it has no children. + "" will become "", and "bar" will + be left alone. + + :param tag_name: The name of a markup tag. + """ + if self.empty_element_tags is None: + return True + return tag_name in self.empty_element_tags + + def feed(self, markup: _RawMarkup) -> None: + """Run incoming markup through some parsing process.""" + raise NotImplementedError() + + def prepare_markup( + self, + markup: _RawMarkup, + user_specified_encoding: Optional[_Encoding] = None, + document_declared_encoding: Optional[_Encoding] = None, + exclude_encodings: Optional[_Encodings] = None, + ) -> Iterable[Tuple[_RawMarkup, Optional[_Encoding], Optional[_Encoding], bool]]: + """Run any preliminary steps necessary to make incoming markup + acceptable to the parser. + + :param markup: The markup that's about to be parsed. + :param user_specified_encoding: The user asked to try this encoding + to convert the markup into a Unicode string. + :param document_declared_encoding: The markup itself claims to be + in this encoding. NOTE: This argument is not used by the + calling code and can probably be removed. + :param exclude_encodings: The user asked *not* to try any of + these encodings. + + :yield: A series of 4-tuples: (markup, encoding, declared encoding, + has undergone character replacement) + + Each 4-tuple represents a strategy that the parser can try + to convert the document to Unicode and parse it. Each + strategy will be tried in turn. + + By default, the only strategy is to parse the markup + as-is. See `LXMLTreeBuilderForXML` and + `HTMLParserTreeBuilder` for implementations that take into + account the quirks of particular parsers. + + :meta private: + + """ + yield markup, None, None, False + + def test_fragment_to_document(self, fragment: str) -> str: + """Wrap an HTML fragment to make it look like a document. + + Different parsers do this differently. For instance, lxml + introduces an empty tag, and html5lib + doesn't. Abstracting this away lets us write simple tests + which run HTML fragments through the parser and compare the + results against other HTML fragments. + + This method should not be used outside of unit tests. + + :param fragment: A fragment of HTML. + :return: A full HTML document. + :meta private: + """ + return fragment + + def set_up_substitutions(self, tag: Tag) -> bool: + """Set up any substitutions that will need to be performed on + a `Tag` when it's output as a string. + + By default, this does nothing. See `HTMLTreeBuilder` for a + case where this is used. + + :return: Whether or not a substitution was performed. + :meta private: + """ + return False + + def _replace_cdata_list_attribute_values( + self, tag_name: str, attrs: _RawOrProcessedAttributeValues + ) -> _AttributeValues: + """When an attribute value is associated with a tag that can + have multiple values for that attribute, convert the string + value to a list of strings. + + Basically, replaces class="foo bar" with class=["foo", "bar"] + + NOTE: This method modifies its input in place. + + :param tag_name: The name of a tag. + :param attrs: A dictionary containing the tag's attributes. + Any appropriate attribute values will be modified in place. + :return: The modified dictionary that was originally passed in. + """ + + # First, cast the attrs dict to _AttributeValues. This might + # not be accurate yet, but it will be by the time this method + # returns. + modified_attrs = cast(_AttributeValues, attrs) + if not modified_attrs or not self.cdata_list_attributes: + # Nothing to do. + return modified_attrs + + # There is at least a possibility that we need to modify one of + # the attribute values. + universal: Set[str] = self.cdata_list_attributes.get("*", set()) + tag_specific = self.cdata_list_attributes.get(tag_name.lower(), None) + for attr in list(modified_attrs.keys()): + modified_value: _AttributeValue + if attr in universal or (tag_specific and attr in tag_specific): + # We have a "class"-type attribute whose string + # value is a whitespace-separated list of + # values. Split it into a list. + original_value: _AttributeValue = modified_attrs[attr] + if isinstance(original_value, _RawAttributeValue): + # This is a _RawAttributeValue (a string) that + # needs to be split and converted to a + # AttributeValueList so it can be an + # _AttributeValue. + modified_value = self.attribute_value_list_class( + nonwhitespace_re.findall(original_value) + ) + else: + # html5lib calls setAttributes twice for the + # same tag when rearranging the parse tree. On + # the second call the attribute value here is + # already a list. This can also happen when a + # Tag object is cloned. If this happens, leave + # the value alone rather than trying to split + # it again. + modified_value = original_value + modified_attrs[attr] = modified_value + return modified_attrs + + +class SAXTreeBuilder(TreeBuilder): + """A Beautiful Soup treebuilder that listens for SAX events. + + This is not currently used for anything, and it will be removed + soon. It was a good idea, but it wasn't properly integrated into the + rest of Beautiful Soup, so there have been long stretches where it + hasn't worked properly. + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + warnings.warn( + "The SAXTreeBuilder class was deprecated in 4.13.0 and will be removed soon thereafter. It is completely untested and probably doesn't work; do not use it.", + DeprecationWarning, + stacklevel=2, + ) + super(SAXTreeBuilder, self).__init__(*args, **kwargs) + + def feed(self, markup: _RawMarkup) -> None: + raise NotImplementedError() + + def close(self) -> None: + pass + + def startElement(self, name: str, attrs: Dict[str, str]) -> None: + attrs = AttributeDict((key[1], value) for key, value in list(attrs.items())) + # print("Start %s, %r" % (name, attrs)) + assert self.soup is not None + self.soup.handle_starttag(name, None, None, attrs) + + def endElement(self, name: str) -> None: + # print("End %s" % name) + assert self.soup is not None + self.soup.handle_endtag(name) + + def startElementNS( + self, nsTuple: Tuple[str, str], nodeName: str, attrs: Dict[str, str] + ) -> None: + # Throw away (ns, nodeName) for now. + self.startElement(nodeName, attrs) + + def endElementNS(self, nsTuple: Tuple[str, str], nodeName: str) -> None: + # Throw away (ns, nodeName) for now. + self.endElement(nodeName) + # handler.endElementNS((ns, node.nodeName), node.nodeName) + + def startPrefixMapping(self, prefix: str, nodeValue: str) -> None: + # Ignore the prefix for now. + pass + + def endPrefixMapping(self, prefix: str) -> None: + # Ignore the prefix for now. + # handler.endPrefixMapping(prefix) + pass + + def characters(self, content: str) -> None: + assert self.soup is not None + self.soup.handle_data(content) + + def startDocument(self) -> None: + pass + + def endDocument(self) -> None: + pass + + +class HTMLTreeBuilder(TreeBuilder): + """This TreeBuilder knows facts about HTML, such as which tags are treated + specially by the HTML standard. + """ + + #: Some HTML tags are defined as having no contents. Beautiful Soup + #: treats these specially. + DEFAULT_EMPTY_ELEMENT_TAGS: Set[str] = set( + [ + # These are from HTML5. + "area", + "base", + "br", + "col", + "embed", + "hr", + "img", + "input", + "keygen", + "link", + "menuitem", + "meta", + "param", + "source", + "track", + "wbr", + # These are from earlier versions of HTML and are removed in HTML5. + "basefont", + "bgsound", + "command", + "frame", + "image", + "isindex", + "nextid", + "spacer", + ] + ) + + #: The HTML standard defines these tags as block-level elements. Beautiful + #: Soup does not treat these elements differently from other elements, + #: but it may do so eventually, and this information is available if + #: you need to use it. + DEFAULT_BLOCK_ELEMENTS: Set[str] = set( + [ + "address", + "article", + "aside", + "blockquote", + "canvas", + "dd", + "div", + "dl", + "dt", + "fieldset", + "figcaption", + "figure", + "footer", + "form", + "h1", + "h2", + "h3", + "h4", + "h5", + "h6", + "header", + "hr", + "li", + "main", + "nav", + "noscript", + "ol", + "output", + "p", + "pre", + "section", + "table", + "tfoot", + "ul", + "video", + ] + ) + + #: These HTML tags need special treatment so they can be + #: represented by a string class other than `bs4.element.NavigableString`. + #: + #: For some of these tags, it's because the HTML standard defines + #: an unusual content model for them. I made this list by going + #: through the HTML spec + #: (https://html.spec.whatwg.org/#metadata-content) and looking for + #: "metadata content" elements that can contain strings. + #: + #: The Ruby tags ( and ) are here despite being normal + #: "phrasing content" tags, because the content they contain is + #: qualitatively different from other text in the document, and it + #: can be useful to be able to distinguish it. + #: + #: TODO: Arguably

foo

" + soup = self.soup(markup) + return doctype.encode("utf8"), soup + + def test_normal_doctypes(self): + """Make sure normal, everyday HTML doctypes are handled correctly.""" + self.assertDoctypeHandled("html") + self.assertDoctypeHandled( + 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"' + ) + + def test_empty_doctype(self): + soup = self.soup("") + doctype = soup.contents[0] + assert "" == doctype.strip() + + def test_mixed_case_doctype(self): + # A lowercase or mixed-case doctype becomes a Doctype. + for doctype_fragment in ("doctype", "DocType"): + doctype_str, soup = self._document_with_doctype("html", doctype_fragment) + + # Make sure a Doctype object was created and that the DOCTYPE + # is uppercase. + doctype = soup.contents[0] + assert doctype.__class__ == Doctype + assert doctype == "html" + assert soup.encode("utf8")[: len(doctype_str)] == b"" + + # Make sure that the doctype was correctly associated with the + # parse tree and that the rest of the document parsed. + assert soup.p.contents[0] == "foo" + + def test_public_doctype_with_url(self): + doctype = 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"' + self.assertDoctypeHandled(doctype) + + def test_system_doctype(self): + self.assertDoctypeHandled('foo SYSTEM "http://www.example.com/"') + + def test_namespaced_system_doctype(self): + # We can handle a namespaced doctype with a system ID. + self.assertDoctypeHandled('xsl:stylesheet SYSTEM "htmlent.dtd"') + + def test_namespaced_public_doctype(self): + # Test a namespaced doctype with a public id. + self.assertDoctypeHandled('xsl:stylesheet PUBLIC "htmlent.dtd"') + + def test_real_xhtml_document(self): + """A real XHTML document should come out more or less the same as it went in.""" + markup = b""" + + +Hello. +Goodbye. +""" + with warnings.catch_warnings(record=True) as w: + soup = self.soup(markup) + assert soup.encode("utf-8").replace(b"\n", b"") == markup.replace(b"\n", b"") + + # No warning was issued about parsing an XML document as HTML, + # because XHTML is both. + assert w == [] + + def test_namespaced_html(self): + # When a namespaced XML document is parsed as HTML it should + # be treated as HTML with weird tag names. + markup = b"""content""" + with warnings.catch_warnings(record=True) as w: + soup = self.soup(markup) + + assert 2 == len(soup.find_all("ns1:foo")) + + # n.b. no "you're parsing XML as HTML" warning was given + # because there was no XML declaration. + assert [] == w + + def test_detect_xml_parsed_as_html(self): + # A warning is issued when parsing an XML document as HTML, + # but basic stuff should still work. + markup = b"""string""" + with warnings.catch_warnings(record=True) as w: + soup = self.soup(markup) + assert soup.tag.string == "string" + [warning] = w + assert isinstance(warning.message, XMLParsedAsHTMLWarning) + assert str(warning.message) == XMLParsedAsHTMLWarning.MESSAGE + + # NOTE: the warning is not issued if the document appears to + # be XHTML (tested with test_real_xhtml_document in the + # superclass) or if there is no XML declaration (tested with + # test_namespaced_html in the superclass). + + def test_processing_instruction(self): + # We test both Unicode and bytestring to verify that + # process_markup correctly sets processing_instruction_class + # even when the markup is already Unicode and there is no + # need to process anything. + markup = """""" + soup = self.soup(markup) + assert markup == soup.decode() + + markup = b"""""" + soup = self.soup(markup) + assert markup == soup.encode("utf8") + + def test_deepcopy(self): + """Make sure you can copy the tree builder. + + This is important because the builder is part of a + BeautifulSoup object, and we want to be able to copy that. + """ + copy.deepcopy(self.default_builder) + + def test_p_tag_is_never_empty_element(self): + """A

tag is never designated as an empty-element tag. + + Even if the markup shows it as an empty-element tag, it + shouldn't be presented that way. + """ + soup = self.soup("

") + assert not soup.p.is_empty_element + assert str(soup.p) == "

" + + def test_unclosed_tags_get_closed(self): + """A tag that's not closed by the end of the document should be closed. + + This applies to all tags except empty-element tags. + """ + self.assert_soup("

", "

") + self.assert_soup("", "") + + self.assert_soup("
", "
") + + def test_br_is_always_empty_element_tag(self): + """A
tag is designated as an empty-element tag. + + Some parsers treat

as one
tag, some parsers as + two tags, but it should always be an empty-element tag. + """ + soup = self.soup("

") + assert soup.br.is_empty_element + assert str(soup.br) == "
" + + def test_nested_formatting_elements(self): + self.assert_soup("") + + def test_double_head(self): + html = """ + + +Ordinary HEAD element test + + + +Hello, world! + + +""" + soup = self.soup(html) + assert "text/javascript" == soup.find("script")["type"] + + def test_comment(self): + # Comments are represented as Comment objects. + markup = "

foobaz

" + self.assert_soup(markup) + + soup = self.soup(markup) + comment = soup.find(string="foobar") + assert comment.__class__ == Comment + + # The comment is properly integrated into the tree. + foo = soup.find(string="foo") + assert comment == foo.next_element + baz = soup.find(string="baz") + assert comment == baz.previous_element + + def test_preserved_whitespace_in_pre_and_textarea(self): + """Whitespace must be preserved in
 and \n"
+        self.assert_soup(pre_markup)
+        self.assert_soup(textarea_markup)
+
+        soup = self.soup(pre_markup)
+        assert soup.pre.prettify() == pre_markup
+
+        soup = self.soup(textarea_markup)
+        assert soup.textarea.prettify() == textarea_markup
+
+        soup = self.soup("")
+        assert soup.textarea.prettify() == "\n"
+
+    def test_nested_inline_elements(self):
+        """Inline elements can be nested indefinitely."""
+        b_tag = "Inside a B tag"
+        self.assert_soup(b_tag)
+
+        nested_b_tag = "

A nested tag

" + self.assert_soup(nested_b_tag) + + double_nested_b_tag = "

A doubly nested tag

" + self.assert_soup(double_nested_b_tag) + + def test_nested_block_level_elements(self): + """Block elements can be nested.""" + soup = self.soup("

Foo

") + blockquote = soup.blockquote + assert blockquote.p.b.string == "Foo" + assert blockquote.b.string == "Foo" + + def test_correctly_nested_tables(self): + """One table can go inside another one.""" + markup = ( + '' + "" + "" + ) + + self.assert_soup( + markup, + '
Here's another table:" + '' + "" + "
foo
Here\'s another table:' + '
foo
' + "
", + ) + + self.assert_soup( + "" + "" + "
Foo
Bar
Baz
" + ) + + def test_multivalued_attribute_with_whitespace(self): + # Whitespace separating the values of a multi-valued attribute + # should be ignored. + + markup = '
' + soup = self.soup(markup) + assert ["foo", "bar"] == soup.div["class"] + + # If you search by the literal name of the class it's like the whitespace + # wasn't there. + assert soup.div == soup.find("div", class_="foo bar") + + def test_deeply_nested_multivalued_attribute(self): + # html5lib can set the attributes of the same tag many times + # as it rearranges the tree. This has caused problems with + # multivalued attributes. + markup = '
' + soup = self.soup(markup) + assert ["css"] == soup.div.div["class"] + + def test_multivalued_attribute_on_html(self): + # html5lib uses a different API to set the attributes ot the + # tag. This has caused problems with multivalued + # attributes. + markup = '' + soup = self.soup(markup) + assert ["a", "b"] == soup.html["class"] + + def test_angle_brackets_in_attribute_values_are_escaped(self): + self.assert_soup('', '') + + def test_strings_resembling_character_entity_references(self): + # "&T" and "&p" look like incomplete character entities, but they are + # not. + self.assert_soup( + "

• AT&T is in the s&p 500

", + "

\u2022 AT&T is in the s&p 500

", + ) + + def test_apos_entity(self): + self.assert_soup( + "

Bob's Bar

", + "

Bob's Bar

", + ) + + def test_entities_in_foreign_document_encoding(self): + # “ and ” are invalid numeric entities referencing + # Windows-1252 characters. - references a character common + # to Windows-1252 and Unicode, and ☃ references a + # character only found in Unicode. + # + # All of these entities should be converted to Unicode + # characters. + markup = "

“Hello” -☃

" + soup = self.soup(markup) + assert "“Hello” -☃" == soup.p.string + + def test_entities_in_attributes_converted_to_unicode(self): + expect = '

' + self.assert_soup('

', expect) + self.assert_soup('

', expect) + self.assert_soup('

', expect) + self.assert_soup('

', expect) + + def test_entities_in_text_converted_to_unicode(self): + expect = "

pi\N{LATIN SMALL LETTER N WITH TILDE}ata

" + self.assert_soup("

piñata

", expect) + self.assert_soup("

piñata

", expect) + self.assert_soup("

piñata

", expect) + self.assert_soup("

piñata

", expect) + + def test_quot_entity_converted_to_quotation_mark(self): + self.assert_soup( + "

I said "good day!"

", '

I said "good day!"

' + ) + + def test_out_of_range_entity(self): + expect = "\N{REPLACEMENT CHARACTER}" + self.assert_soup("�", expect) + self.assert_soup("�", expect) + self.assert_soup("�", expect) + + def test_multipart_strings(self): + "Mostly to prevent a recurrence of a bug in the html5lib treebuilder." + soup = self.soup("

\nfoo

") + assert "p" == soup.h2.string.next_element.name + assert "p" == soup.p.name + self.assertConnectedness(soup) + + def test_invalid_html_entity(self): + # The html.parser treebuilder can't distinguish between an + # invalid HTML entity with a semicolon and an invalid HTML + # entity with no semicolon (see its subclass for the tested + # behavior). But the other treebuilders can. + markup = "

a &nosuchentity b

" + soup = self.soup(markup) + assert "

a &nosuchentity b

" == soup.p.decode() + + markup = "

a &nosuchentity; b

" + soup = self.soup(markup) + assert "

a &nosuchentity; b

" == soup.p.decode() + + def test_head_tag_between_head_and_body(self): + "Prevent recurrence of a bug in the html5lib treebuilder." + content = """ + + foo + +""" + soup = self.soup(content) + assert soup.html.body is not None + self.assertConnectedness(soup) + + def test_multiple_copies_of_a_tag(self): + "Prevent recurrence of a bug in the html5lib treebuilder." + content = """ + + + + + +""" + soup = self.soup(content) + self.assertConnectedness(soup.article) + + def test_basic_namespaces(self): + """Parsers don't need to *understand* namespaces, but at the + very least they should not choke on namespaces or lose + data.""" + + markup = b'4' + soup = self.soup(markup) + assert markup == soup.encode() + assert "http://www.w3.org/1999/xhtml" == soup.html["xmlns"] + assert "http://www.w3.org/1998/Math/MathML" == soup.html["xmlns:mathml"] + assert "http://www.w3.org/2000/svg" == soup.html["xmlns:svg"] + + def test_multivalued_attribute_value_becomes_list(self): + markup = b'' + soup = self.soup(markup) + assert ["foo", "bar"] == soup.a["class"] + + # + # Generally speaking, tests below this point are more tests of + # Beautiful Soup than tests of the tree builders. But parsers are + # weird, so we run these tests separately for every tree builder + # to detect any differences between them. + # + + def test_can_parse_unicode_document(self): + # A seemingly innocuous document... but it's in Unicode! And + # it contains characters that can't be represented in the + # encoding found in the declaration! The horror! + markup = 'Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!' + soup = self.soup(markup) + assert "Sacr\xe9 bleu!" == soup.body.string + + def test_soupstrainer(self): + """Parsers should be able to work with SoupStrainers.""" + strainer = SoupStrainer("b") + soup = self.soup("A bold statement", parse_only=strainer) + assert soup.decode() == "bold" + + def test_single_quote_attribute_values_become_double_quotes(self): + self.assert_soup("", '') + + def test_attribute_values_with_nested_quotes_are_left_alone(self): + text = """a""" + self.assert_soup(text) + + def test_attribute_values_with_double_nested_quotes_get_quoted(self): + text = """a""" + soup = self.soup(text) + soup.foo["attr"] = 'Brawls happen at "Bob\'s Bar"' + self.assert_soup( + soup.foo.decode(), + """a""", + ) + + def test_ampersand_in_attribute_value_gets_escaped(self): + self.assert_soup( + '', + '', + ) + + self.assert_soup( + 'foo', + 'foo', + ) + + def test_escaped_ampersand_in_attribute_value_is_left_alone(self): + self.assert_soup('') + + def test_entities_in_strings_converted_during_parsing(self): + # Both XML and HTML entities are converted to Unicode characters + # during parsing. + text = "

<<sacré bleu!>>

" + expected = ( + "

<<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>

" + ) + self.assert_soup(text, expected) + + def test_smart_quotes_converted_on_the_way_in(self): + # Microsoft smart quotes are converted to Unicode characters during + # parsing. + quote = b"

\x91Foo\x92

" + soup = self.soup(quote, from_encoding="windows-1252") + assert ( + soup.p.string + == "\N{LEFT SINGLE QUOTATION MARK}Foo\N{RIGHT SINGLE QUOTATION MARK}" + ) + + def test_non_breaking_spaces_converted_on_the_way_in(self): + soup = self.soup("  ") + assert soup.a.string == "\N{NO-BREAK SPACE}" * 2 + + def test_entities_converted_on_the_way_out(self): + text = "

<<sacré bleu!>>

" + expected = "

<<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>

".encode( + "utf-8" + ) + soup = self.soup(text) + assert soup.p.encode("utf-8") == expected + + def test_real_iso_8859_document(self): + # Smoke test of interrelated functionality, using an + # easy-to-understand document. + + # Here it is in Unicode. Note that it claims to be in ISO-8859-1. + unicode_html = '

Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!

' + + # That's because we're going to encode it into ISO-8859-1, + # and use that to test. + iso_latin_html = unicode_html.encode("iso-8859-1") + + # Parse the ISO-8859-1 HTML. + soup = self.soup(iso_latin_html) + + # Encode it to UTF-8. + result = soup.encode("utf-8") + + # What do we expect the result to look like? Well, it would + # look like unicode_html, except that the META tag would say + # UTF-8 instead of ISO-8859-1. + expected = unicode_html.replace("ISO-8859-1", "utf-8") + + # And, of course, it would be in UTF-8, not Unicode. + expected = expected.encode("utf-8") + + # Ta-da! + assert result == expected + + def test_real_shift_jis_document(self): + # Smoke test to make sure the parser can handle a document in + # Shift-JIS encoding, without choking. + shift_jis_html = ( + b"
"
+            b"\x82\xb1\x82\xea\x82\xcdShift-JIS\x82\xc5\x83R\x81[\x83f"
+            b"\x83B\x83\x93\x83O\x82\xb3\x82\xea\x82\xbd\x93\xfa\x96{\x8c"
+            b"\xea\x82\xcc\x83t\x83@\x83C\x83\x8b\x82\xc5\x82\xb7\x81B"
+            b"
" + ) + unicode_html = shift_jis_html.decode("shift-jis") + soup = self.soup(unicode_html) + + # Make sure the parse tree is correctly encoded to various + # encodings. + assert soup.encode("utf-8") == unicode_html.encode("utf-8") + assert soup.encode("euc_jp") == unicode_html.encode("euc_jp") + + def test_real_hebrew_document(self): + # A real-world test to make sure we can convert ISO-8859-9 (a + # Hebrew encoding) to UTF-8. + hebrew_document = b"Hebrew (ISO 8859-8) in Visual Directionality

Hebrew (ISO 8859-8) in Visual Directionality

\xed\xe5\xec\xf9" + soup = self.soup(hebrew_document, from_encoding="iso8859-8") + # Some tree builders call it iso8859-8, others call it iso-8859-9. + # That's not a difference we really care about. + assert soup.original_encoding in ("iso8859-8", "iso-8859-8") + assert soup.encode("utf-8") == ( + hebrew_document.decode("iso8859-8").encode("utf-8") + ) + + def test_meta_tag_reflects_current_encoding(self): + # Here's the tag saying that a document is + # encoded in Shift-JIS. + meta_tag = ( + '' + ) + + # Here's a document incorporating that meta tag. + shift_jis_html = ( + "\n%s\n" + '' + "Shift-JIS markup goes here." + ) % meta_tag + soup = self.soup(shift_jis_html) + + # Parse the document, and the charset is seemingly unaffected. + parsed_meta = soup.find("meta", {"http-equiv": "Content-type"}) + content = parsed_meta["content"] + assert "text/html; charset=x-sjis" == content + + # But that value is actually a ContentMetaAttributeValue object. + assert isinstance(content, ContentMetaAttributeValue) + + # And it will take on a value that reflects its current + # encoding. + assert "text/html; charset=utf8" == content.substitute_encoding("utf8") + + # No matter how the tag is encoded, its charset attribute + # will always be accurate. + assert b"charset=utf8" in parsed_meta.encode("utf8") + assert b"charset=shift-jis" in parsed_meta.encode("shift-jis") + + # For the rest of the story, see TestSubstitutions in + # test_tree.py. + + def test_html5_style_meta_tag_reflects_current_encoding(self): + # Here's the tag saying that a document is + # encoded in Shift-JIS. + meta_tag = '' + + # Here's a document incorporating that meta tag. + shift_jis_html = ( + "\n%s\n" + '' + "Shift-JIS markup goes here." + ) % meta_tag + soup = self.soup(shift_jis_html) + + # Parse the document, and the charset is seemingly unaffected. + parsed_meta = soup.find("meta", id="encoding") + charset = parsed_meta["charset"] + assert "x-sjis" == charset + + # But that value is actually a CharsetMetaAttributeValue object. + assert isinstance(charset, CharsetMetaAttributeValue) + + # And it will take on a value that reflects its current + # encoding. + assert "utf8" == charset.substitute_encoding("utf8") + + # No matter how the tag is encoded, its charset attribute + # will always be accurate. + assert b'charset="utf8"' in parsed_meta.encode("utf8") + assert b'charset="shift-jis"' in parsed_meta.encode("shift-jis") + + def test_python_specific_encodings_not_used_in_charset(self): + # You can encode an HTML document using a Python-specific + # encoding, but that encoding won't be mentioned _inside_ the + # resulting document. Instead, the document will appear to + # have no encoding. + for markup in [ + b'' b'' + ]: + soup = self.soup(markup) + for encoding in PYTHON_SPECIFIC_ENCODINGS: + if encoding in ( + "idna", + "mbcs", + "oem", + "undefined", + "string_escape", + "string-escape", + ): + # For one reason or another, these will raise an + # exception if we actually try to use them, so don't + # bother. + continue + encoded = soup.encode(encoding) + assert b'meta charset=""' in encoded + assert encoding.encode("ascii") not in encoded + + def test_tag_with_no_attributes_can_have_attributes_added(self): + data = self.soup("text") + data.a["foo"] = "bar" + assert 'text' == data.a.decode() + + def test_closing_tag_with_no_opening_tag(self): + # Without BeautifulSoup.open_tag_counter, the tag will + # cause _popToTag to be called over and over again as we look + # for a tag that wasn't there. The result is that 'text2' + # will show up outside the body of the document. + soup = self.soup("

text1

text2
") + assert "

text1

text2
" == soup.body.decode() + + def test_worst_case(self): + """Test the worst case (currently) for linking issues.""" + + soup = self.soup(BAD_DOCUMENT) + self.linkage_validator(soup) + + +class XMLTreeBuilderSmokeTest(TreeBuilderSmokeTest): + def test_pickle_and_unpickle_identity(self): + # Pickling a tree, then unpickling it, yields a tree identical + # to the original. + tree = self.soup("foo") + dumped = pickle.dumps(tree, 2) + loaded = pickle.loads(dumped) + assert loaded.__class__ == BeautifulSoup + assert loaded.decode() == tree.decode() + + def test_docstring_generated(self): + soup = self.soup("") + assert soup.encode() == b'\n' + + def test_xml_declaration(self): + markup = b"""\n""" + soup = self.soup(markup) + assert markup == soup.encode("utf8") + + def test_python_specific_encodings_not_used_in_xml_declaration(self): + # You can encode an XML document using a Python-specific + # encoding, but that encoding won't be mentioned _inside_ the + # resulting document. + markup = b"""\n""" + soup = self.soup(markup) + for encoding in PYTHON_SPECIFIC_ENCODINGS: + if encoding in ( + "idna", + "mbcs", + "oem", + "undefined", + "string_escape", + "string-escape", + ): + # For one reason or another, these will raise an + # exception if we actually try to use them, so don't + # bother. + continue + encoded = soup.encode(encoding) + assert b'' in encoded + assert encoding.encode("ascii") not in encoded + + def test_processing_instruction(self): + markup = b"""\n""" + soup = self.soup(markup) + assert markup == soup.encode("utf8") + + def test_real_xhtml_document(self): + """A real XHTML document should come out *exactly* the same as it went in.""" + markup = b""" + + +Hello. +Goodbye. +""" + soup = self.soup(markup) + assert soup.encode("utf-8") == markup + + def test_nested_namespaces(self): + doc = b""" + + + + + +""" + soup = self.soup(doc) + assert doc == soup.encode() + + def test_formatter_processes_script_tag_for_xml_documents(self): + doc = """ + +""" + soup = BeautifulSoup(doc, "lxml-xml") + # lxml would have stripped this while parsing, but we can add + # it later. + soup.script.string = 'console.log("< < hey > > ");' + encoded = soup.encode() + assert b"< < hey > >" in encoded + + def test_can_parse_unicode_document(self): + markup = 'Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!' + soup = self.soup(markup) + assert "Sacr\xe9 bleu!" == soup.root.string + + def test_can_parse_unicode_document_begining_with_bom(self): + markup = '\N{BYTE ORDER MARK}Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!' + soup = self.soup(markup) + assert "Sacr\xe9 bleu!" == soup.root.string + + def test_popping_namespaced_tag(self): + markup = 'b2012-07-02T20:33:42Zcd' + soup = self.soup(markup) + assert str(soup.rss) == markup + + def test_docstring_includes_correct_encoding(self): + soup = self.soup("") + assert ( + soup.encode("latin1") == b'\n' + ) + + def test_large_xml_document(self): + """A large XML document should come out the same as it went in.""" + markup = ( + b'\n' + + b"0" * (2**12) + + b"" + ) + soup = self.soup(markup) + assert soup.encode("utf-8") == markup + + def test_tags_are_empty_element_if_and_only_if_they_are_empty(self): + self.assert_soup("

", "

") + self.assert_soup("

foo

") + + def test_namespaces_are_preserved(self): + markup = 'This tag is in the a namespaceThis tag is in the b namespace' + soup = self.soup(markup) + root = soup.root + assert "http://example.com/" == root["xmlns:a"] + assert "http://example.net/" == root["xmlns:b"] + + def test_closing_namespaced_tag(self): + markup = '

20010504

' + soup = self.soup(markup) + assert str(soup.p) == markup + + def test_namespaced_attributes(self): + markup = '' + soup = self.soup(markup) + assert str(soup.foo) == markup + + def test_namespaced_attributes_xml_namespace(self): + markup = 'bar' + soup = self.soup(markup) + assert str(soup.foo) == markup + + def test_find_by_prefixed_name(self): + doc = """ + + foo + bar + baz + +""" + soup = self.soup(doc) + + # There are three tags. + assert 3 == len(soup.find_all("tag")) + + # But two of them are ns1:tag and one of them is ns2:tag. + assert 2 == len(soup.find_all("ns1:tag")) + assert 1 == len(soup.find_all("ns2:tag")) + + assert 1, len(soup.find_all("ns2:tag", key="value")) + assert 3, len(soup.find_all(["ns1:tag", "ns2:tag"])) + + def test_copy_tag_preserves_namespace(self): + xml = """ +""" + + soup = self.soup(xml) + tag = soup.document + duplicate = copy.copy(tag) + + # The two tags have the same namespace prefix. + assert tag.prefix == duplicate.prefix + + def test_worst_case(self): + """Test the worst case (currently) for linking issues.""" + + soup = self.soup(BAD_DOCUMENT) + self.linkage_validator(soup) + + +class HTML5TreeBuilderSmokeTest(HTMLTreeBuilderSmokeTest): + """Smoke test for a tree builder that supports HTML5.""" + + def test_real_xhtml_document(self): + # Since XHTML is not HTML5, HTML5 parsers are not tested to handle + # XHTML documents in any particular way. + pass + + def test_html_tags_have_namespace(self): + markup = "" + soup = self.soup(markup) + assert "http://www.w3.org/1999/xhtml" == soup.a.namespace + + def test_svg_tags_have_namespace(self): + markup = "" + soup = self.soup(markup) + namespace = "http://www.w3.org/2000/svg" + assert namespace == soup.svg.namespace + assert namespace == soup.circle.namespace + + def test_mathml_tags_have_namespace(self): + markup = "5" + soup = self.soup(markup) + namespace = "http://www.w3.org/1998/Math/MathML" + assert namespace == soup.math.namespace + assert namespace == soup.msqrt.namespace + + def test_xml_declaration_becomes_comment(self): + markup = '' + soup = self.soup(markup) + assert isinstance(soup.contents[0], Comment) + assert soup.contents[0] == '?xml version="1.0" encoding="utf-8"?' + assert "html" == soup.contents[0].next_element.name diff --git a/venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4670634698080256.testcase b/venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4670634698080256.testcase new file mode 100644 index 0000000000..4828f8a423 --- /dev/null +++ b/venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4670634698080256.testcase @@ -0,0 +1 @@ +

\ No newline at end of file diff --git a/venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5000587759190016.testcase b/venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5000587759190016.testcase new file mode 100644 index 0000000000..8a585ce927 Binary files /dev/null and b/venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5000587759190016.testcase differ diff --git a/venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5167584867909632.testcase b/venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5167584867909632.testcase new file mode 100644 index 0000000000..0fe66dd23f Binary files /dev/null and b/venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5167584867909632.testcase differ diff --git a/venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5270998950477824.testcase b/venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5270998950477824.testcase new file mode 100644 index 0000000000..fd411427d7 Binary files /dev/null and b/venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5270998950477824.testcase differ diff --git a/venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5375146639360000.testcase b/venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5375146639360000.testcase new file mode 100644 index 0000000000..6248b2c54f --- /dev/null +++ b/venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5375146639360000.testcase @@ -0,0 +1 @@ + >tet>< \ No newline at end of file diff --git a/venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5492400320282624.testcase b/venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5492400320282624.testcase new file mode 100644 index 0000000000..107da539fc Binary files /dev/null and b/venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5492400320282624.testcase differ diff --git a/venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5703933063462912.testcase b/venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5703933063462912.testcase new file mode 100644 index 0000000000..367106c76f --- /dev/null +++ b/venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5703933063462912.testcase @@ -0,0 +1,2 @@ + +t \ No newline at end of file diff --git a/venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6450958476902400.testcase b/venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6450958476902400.testcase new file mode 100644 index 0000000000..a823d557b4 Binary files /dev/null and b/venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6450958476902400.testcase differ diff --git a/venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6600557255327744.testcase b/venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6600557255327744.testcase new file mode 100644 index 0000000000..65af44d8c1 Binary files /dev/null and b/venv/lib/python3.12/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6600557255327744.testcase differ diff --git a/venv/lib/python3.12/site-packages/bs4/tests/fuzz/crash-0d306a50c8ed8bcd0785b67000fcd5dea1d33f08.testcase b/venv/lib/python3.12/site-packages/bs4/tests/fuzz/crash-0d306a50c8ed8bcd0785b67000fcd5dea1d33f08.testcase new file mode 100644 index 0000000000..5559adbb35 Binary files /dev/null and b/venv/lib/python3.12/site-packages/bs4/tests/fuzz/crash-0d306a50c8ed8bcd0785b67000fcd5dea1d33f08.testcase differ diff --git a/venv/lib/python3.12/site-packages/bs4/tests/fuzz/crash-ffbdfa8a2b26f13537b68d3794b0478a4090ee4a.testcase b/venv/lib/python3.12/site-packages/bs4/tests/fuzz/crash-ffbdfa8a2b26f13537b68d3794b0478a4090ee4a.testcase new file mode 100644 index 0000000000..8857115540 Binary files /dev/null and b/venv/lib/python3.12/site-packages/bs4/tests/fuzz/crash-ffbdfa8a2b26f13537b68d3794b0478a4090ee4a.testcase differ diff --git a/venv/lib/python3.12/site-packages/bs4/tests/test_builder.py b/venv/lib/python3.12/site-packages/bs4/tests/test_builder.py new file mode 100644 index 0000000000..87d675870e --- /dev/null +++ b/venv/lib/python3.12/site-packages/bs4/tests/test_builder.py @@ -0,0 +1,28 @@ +import pytest +from unittest.mock import patch +from bs4.builder import DetectsXMLParsedAsHTML + + +class TestDetectsXMLParsedAsHTML: + @pytest.mark.parametrize( + "markup,looks_like_xml", + [ + ("No xml declaration", False), + ("obviously HTMLActually XHTML", False), + (" < html>Tricky XHTML", False), + ("", True), + ], + ) + def test_warn_if_markup_looks_like_xml(self, markup, looks_like_xml): + # Test of our ability to guess at whether markup looks XML-ish + # _and_ not HTML-ish. + with patch("bs4.builder.DetectsXMLParsedAsHTML._warn") as mock: + for data in markup, markup.encode("utf8"): + result = DetectsXMLParsedAsHTML.warn_if_markup_looks_like_xml(data) + assert result == looks_like_xml + if looks_like_xml: + assert mock.called + else: + assert not mock.called + mock.reset_mock() diff --git a/venv/lib/python3.12/site-packages/bs4/tests/test_builder_registry.py b/venv/lib/python3.12/site-packages/bs4/tests/test_builder_registry.py new file mode 100644 index 0000000000..ad4b5a9ed5 --- /dev/null +++ b/venv/lib/python3.12/site-packages/bs4/tests/test_builder_registry.py @@ -0,0 +1,139 @@ +"""Tests of the builder registry.""" + +import pytest +import warnings +from typing import Type + +from bs4 import BeautifulSoup +from bs4.builder import ( + builder_registry as registry, + TreeBuilder, + TreeBuilderRegistry, +) +from bs4.builder._htmlparser import HTMLParserTreeBuilder + +from . import ( + HTML5LIB_PRESENT, + LXML_PRESENT, +) + +if HTML5LIB_PRESENT: + from bs4.builder._html5lib import HTML5TreeBuilder + +if LXML_PRESENT: + from bs4.builder._lxml import ( + LXMLTreeBuilderForXML, + LXMLTreeBuilder, + ) + + +# TODO: Split out the lxml and html5lib tests into their own classes +# and gate with pytest.mark.skipIf. +class TestBuiltInRegistry(object): + """Test the built-in registry with the default builders registered.""" + + def test_combination(self): + assert registry.lookup("strict", "html") == HTMLParserTreeBuilder + if LXML_PRESENT: + assert registry.lookup("fast", "html") == LXMLTreeBuilder + assert registry.lookup("permissive", "xml") == LXMLTreeBuilderForXML + if HTML5LIB_PRESENT: + assert registry.lookup("html5lib", "html") == HTML5TreeBuilder + + def test_lookup_by_markup_type(self): + if LXML_PRESENT: + assert registry.lookup("html") == LXMLTreeBuilder + assert registry.lookup("xml") == LXMLTreeBuilderForXML + else: + assert registry.lookup("xml") is None + if HTML5LIB_PRESENT: + assert registry.lookup("html") == HTML5TreeBuilder + else: + assert registry.lookup("html") == HTMLParserTreeBuilder + + def test_named_library(self): + if LXML_PRESENT: + assert registry.lookup("lxml", "xml") == LXMLTreeBuilderForXML + assert registry.lookup("lxml", "html") == LXMLTreeBuilder + if HTML5LIB_PRESENT: + assert registry.lookup("html5lib") == HTML5TreeBuilder + + assert registry.lookup("html.parser") == HTMLParserTreeBuilder + + def test_beautifulsoup_constructor_does_lookup(self): + with warnings.catch_warnings(record=True): + # This will create a warning about not explicitly + # specifying a parser, but we'll ignore it. + + # You can pass in a string. + BeautifulSoup("", features="html") + # Or a list of strings. + BeautifulSoup("", features=["html", "fast"]) + pass + + # You'll get an exception if BS can't find an appropriate + # builder. + with pytest.raises(ValueError): + BeautifulSoup("", features="no-such-feature") + + +class TestRegistry(object): + """Test the TreeBuilderRegistry class in general.""" + + def setup_method(self): + self.registry = TreeBuilderRegistry() + + def builder_for_features(self, *feature_list: str) -> Type[TreeBuilder]: + cls = type( + "Builder_" + "_".join(feature_list), (object,), {"features": feature_list} + ) + + self.registry.register(cls) + return cls + + def test_register_with_no_features(self): + builder = self.builder_for_features() + + # Since the builder advertises no features, you can't find it + # by looking up features. + assert self.registry.lookup("foo") is None + + # But you can find it by doing a lookup with no features, if + # this happens to be the only registered builder. + assert self.registry.lookup() == builder + + def test_register_with_features_makes_lookup_succeed(self): + builder = self.builder_for_features("foo", "bar") + assert self.registry.lookup("foo") is builder + assert self.registry.lookup("bar") is builder + + def test_lookup_fails_when_no_builder_implements_feature(self): + assert self.registry.lookup("baz") is None + + def test_lookup_gets_most_recent_registration_when_no_feature_specified(self): + self.builder_for_features("foo") + builder2 = self.builder_for_features("bar") + assert self.registry.lookup() == builder2 + + def test_lookup_fails_when_no_tree_builders_registered(self): + assert self.registry.lookup() is None + + def test_lookup_gets_most_recent_builder_supporting_all_features(self): + self.builder_for_features("foo") + self.builder_for_features("bar") + has_both_early = self.builder_for_features("foo", "bar", "baz") + has_both_late = self.builder_for_features("foo", "bar", "quux") + self.builder_for_features("bar") + self.builder_for_features("foo") + + # There are two builders featuring 'foo' and 'bar', but + # the one that also features 'quux' was registered later. + assert self.registry.lookup("foo", "bar") == has_both_late + + # There is only one builder featuring 'foo', 'bar', and 'baz'. + assert self.registry.lookup("foo", "bar", "baz") == has_both_early + + def test_lookup_fails_when_cannot_reconcile_requested_features(self): + self.builder_for_features("foo", "bar") + self.builder_for_features("foo", "baz") + assert self.registry.lookup("bar", "baz") is None diff --git a/venv/lib/python3.12/site-packages/bs4/tests/test_css.py b/venv/lib/python3.12/site-packages/bs4/tests/test_css.py new file mode 100644 index 0000000000..b1c423793e --- /dev/null +++ b/venv/lib/python3.12/site-packages/bs4/tests/test_css.py @@ -0,0 +1,536 @@ +import pytest +import types + +from bs4 import ( + BeautifulSoup, + ResultSet, +) + +from typing import ( + Any, + List, + Tuple, + Type, +) + +from packaging.version import Version + +from . import ( + SoupTest, + SOUP_SIEVE_PRESENT, +) + +SOUPSIEVE_EXCEPTION_ON_UNSUPPORTED_PSEUDOCLASS: Type[Exception] +if SOUP_SIEVE_PRESENT: + from soupsieve import __version__, SelectorSyntaxError + + # Some behavior changes in soupsieve 2.6 that affects one of our + # tests. For the test to run under all versions of Python + # supported by Beautiful Soup (which includes versions of Python + # not supported by soupsieve 2.6) we need to check both behaviors. + SOUPSIEVE_EXCEPTION_ON_UNSUPPORTED_PSEUDOCLASS = SelectorSyntaxError + if Version(__version__) < Version("2.6"): + SOUPSIEVE_EXCEPTION_ON_UNSUPPORTED_PSEUDOCLASS = NotImplementedError + + +@pytest.mark.skipif(not SOUP_SIEVE_PRESENT, reason="Soup Sieve not installed") +class TestCSSSelectors(SoupTest): + """Test basic CSS selector functionality. + + This functionality is implemented in soupsieve, which has a much + more comprehensive test suite, so this is basically an extra check + that soupsieve works as expected. + """ + + HTML = """ + + + +The title + + + +Hello there. +
+
+

An H1

+

Some text

+

Some more text

+

An H2

+

Another

+Bob +

Another H2

+me + +span1a1 +span1a2 test + +span2a1 + + + +
+ +
+ + + + + + + + +

English

+

English UK

+

English US

+

French

+
+ + +""" + + def setup_method(self): + self._soup = BeautifulSoup(self.HTML, "html.parser") + + def assert_css_selects( + self, selector: str, expected_ids: List[str], **kwargs: Any + ) -> None: + results = self._soup.select(selector, **kwargs) + assert isinstance(results, ResultSet) + el_ids = [el["id"] for el in results] + el_ids.sort() + expected_ids.sort() + assert expected_ids == el_ids, "Selector %s, expected [%s], got [%s]" % ( + selector, + ", ".join(expected_ids), + ", ".join(el_ids), + ) + + assertSelect = assert_css_selects + + def assert_css_select_multiple(self, *tests: Tuple[str, List[str]]): + for selector, expected_ids in tests: + self.assert_css_selects(selector, expected_ids) + + def test_precompiled(self): + sel = self._soup.css.compile("div") + + els = self._soup.select(sel) + assert len(els) == 4 + for div in els: + assert div.name == "div" + + el = self._soup.select_one(sel) + assert "main" == el["id"] + + def test_one_tag_one(self): + els = self._soup.select("title") + assert len(els) == 1 + assert els[0].name == "title" + assert els[0].contents == ["The title"] + + def test_one_tag_many(self): + els = self._soup.select("div") + assert len(els) == 4 + for div in els: + assert div.name == "div" + + el = self._soup.select_one("div") + assert "main" == el["id"] + + def test_select_one_returns_none_if_no_match(self): + match = self._soup.select_one("nonexistenttag") + assert None is match + + def test_tag_in_tag_one(self): + self.assert_css_selects("div div", ["inner", "data1"]) + + def test_tag_in_tag_many(self): + for selector in ("html div", "html body div", "body div"): + self.assert_css_selects(selector, ["data1", "main", "inner", "footer"]) + + def test_limit(self): + self.assert_css_selects("html div", ["main"], limit=1) + self.assert_css_selects("html body div", ["inner", "main"], limit=2) + self.assert_css_selects( + "body div", ["data1", "main", "inner", "footer"], limit=10 + ) + + def test_tag_no_match(self): + assert len(self._soup.select("del")) == 0 + + def test_invalid_tag(self): + with pytest.raises(SelectorSyntaxError): + self._soup.select("tag%t") + + def test_select_dashed_tag_ids(self): + self.assert_css_selects("custom-dashed-tag", ["dash1", "dash2"]) + + def test_select_dashed_by_id(self): + dashed = self._soup.select('custom-dashed-tag[id="dash2"]') + assert dashed[0].name == "custom-dashed-tag" + assert dashed[0]["id"] == "dash2" + + def test_dashed_tag_text(self): + assert self._soup.select("body > custom-dashed-tag")[0].text == "Hello there." + + def test_select_dashed_matches_find_all(self): + assert self._soup.select("custom-dashed-tag") == self._soup.find_all( + "custom-dashed-tag" + ) + + def test_header_tags(self): + self.assert_css_select_multiple( + ("h1", ["header1"]), + ("h2", ["header2", "header3"]), + ) + + def test_class_one(self): + for selector in (".onep", "p.onep", "html p.onep"): + els = self._soup.select(selector) + assert len(els) == 1 + assert els[0].name == "p" + assert els[0]["class"] == ["onep"] + + def test_class_mismatched_tag(self): + els = self._soup.select("div.onep") + assert len(els) == 0 + + def test_one_id(self): + for selector in ("div#inner", "#inner", "div div#inner"): + self.assert_css_selects(selector, ["inner"]) + + def test_bad_id(self): + els = self._soup.select("#doesnotexist") + assert len(els) == 0 + + def test_items_in_id(self): + els = self._soup.select("div#inner p") + assert len(els) == 3 + for el in els: + assert el.name == "p" + assert els[1]["class"] == ["onep"] + assert not els[0].has_attr("class") + + def test_a_bunch_of_emptys(self): + for selector in ("div#main del", "div#main div.oops", "div div#main"): + assert len(self._soup.select(selector)) == 0 + + def test_multi_class_support(self): + for selector in ( + ".class1", + "p.class1", + ".class2", + "p.class2", + ".class3", + "p.class3", + "html p.class2", + "div#inner .class2", + ): + self.assert_css_selects(selector, ["pmulti"]) + + def test_multi_class_selection(self): + for selector in (".class1.class3", ".class3.class2", ".class1.class2.class3"): + self.assert_css_selects(selector, ["pmulti"]) + + def test_child_selector(self): + self.assert_css_selects(".s1 > a", ["s1a1", "s1a2"]) + self.assert_css_selects(".s1 > a span", ["s1a2s1"]) + + def test_child_selector_id(self): + self.assert_css_selects(".s1 > a#s1a2 span", ["s1a2s1"]) + + def test_attribute_equals(self): + self.assert_css_select_multiple( + ('p[class="onep"]', ["p1"]), + ('p[id="p1"]', ["p1"]), + ('[class="onep"]', ["p1"]), + ('[id="p1"]', ["p1"]), + ('link[rel="stylesheet"]', ["l1"]), + ('link[type="text/css"]', ["l1"]), + ('link[href="blah.css"]', ["l1"]), + ('link[href="no-blah.css"]', []), + ('[rel="stylesheet"]', ["l1"]), + ('[type="text/css"]', ["l1"]), + ('[href="blah.css"]', ["l1"]), + ('[href="no-blah.css"]', []), + ('p[href="no-blah.css"]', []), + ('[href="no-blah.css"]', []), + ) + + def test_attribute_tilde(self): + self.assert_css_select_multiple( + ('p[class~="class1"]', ["pmulti"]), + ('p[class~="class2"]', ["pmulti"]), + ('p[class~="class3"]', ["pmulti"]), + ('[class~="class1"]', ["pmulti"]), + ('[class~="class2"]', ["pmulti"]), + ('[class~="class3"]', ["pmulti"]), + ('a[rel~="friend"]', ["bob"]), + ('a[rel~="met"]', ["bob"]), + ('[rel~="friend"]', ["bob"]), + ('[rel~="met"]', ["bob"]), + ) + + def test_attribute_startswith(self): + self.assert_css_select_multiple( + ('[rel^="style"]', ["l1"]), + ('link[rel^="style"]', ["l1"]), + ('notlink[rel^="notstyle"]', []), + ('[rel^="notstyle"]', []), + ('link[rel^="notstyle"]', []), + ('link[href^="bla"]', ["l1"]), + ('a[href^="http://"]', ["bob", "me"]), + ('[href^="http://"]', ["bob", "me"]), + ('[id^="p"]', ["pmulti", "p1"]), + ('[id^="m"]', ["me", "main"]), + ('div[id^="m"]', ["main"]), + ('a[id^="m"]', ["me"]), + ('div[data-tag^="dashed"]', ["data1"]), + ) + + def test_attribute_endswith(self): + self.assert_css_select_multiple( + ('[href$=".css"]', ["l1"]), + ('link[href$=".css"]', ["l1"]), + ('link[id$="1"]', ["l1"]), + ( + '[id$="1"]', + ["data1", "l1", "p1", "header1", "s1a1", "s2a1", "s1a2s1", "dash1"], + ), + ('div[id$="1"]', ["data1"]), + ('[id$="noending"]', []), + ) + + def test_attribute_contains(self): + self.assert_css_select_multiple( + # From test_attribute_startswith + ('[rel*="style"]', ["l1"]), + ('link[rel*="style"]', ["l1"]), + ('notlink[rel*="notstyle"]', []), + ('[rel*="notstyle"]', []), + ('link[rel*="notstyle"]', []), + ('link[href*="bla"]', ["l1"]), + ('[href*="http://"]', ["bob", "me"]), + ('[id*="p"]', ["pmulti", "p1"]), + ('div[id*="m"]', ["main"]), + ('a[id*="m"]', ["me"]), + # From test_attribute_endswith + ('[href*=".css"]', ["l1"]), + ('link[href*=".css"]', ["l1"]), + ('link[id*="1"]', ["l1"]), + ( + '[id*="1"]', + [ + "data1", + "l1", + "p1", + "header1", + "s1a1", + "s1a2", + "s2a1", + "s1a2s1", + "dash1", + ], + ), + ('div[id*="1"]', ["data1"]), + ('[id*="noending"]', []), + # New for this test + ('[href*="."]', ["bob", "me", "l1"]), + ('a[href*="."]', ["bob", "me"]), + ('link[href*="."]', ["l1"]), + ('div[id*="n"]', ["main", "inner"]), + ('div[id*="nn"]', ["inner"]), + ('div[data-tag*="edval"]', ["data1"]), + ) + + def test_attribute_exact_or_hypen(self): + self.assert_css_select_multiple( + ('p[lang|="en"]', ["lang-en", "lang-en-gb", "lang-en-us"]), + ('[lang|="en"]', ["lang-en", "lang-en-gb", "lang-en-us"]), + ('p[lang|="fr"]', ["lang-fr"]), + ('p[lang|="gb"]', []), + ) + + def test_attribute_exists(self): + self.assert_css_select_multiple( + ("[rel]", ["l1", "bob", "me"]), + ("link[rel]", ["l1"]), + ("a[rel]", ["bob", "me"]), + ("[lang]", ["lang-en", "lang-en-gb", "lang-en-us", "lang-fr"]), + ("p[class]", ["p1", "pmulti"]), + ("[blah]", []), + ("p[blah]", []), + ("div[data-tag]", ["data1"]), + ) + + def test_quoted_space_in_selector_name(self): + html = """
nope
+
yes
+ """ + soup = BeautifulSoup(html, "html.parser") + [chosen] = soup.select('div[style="display: right"]') + assert "yes" == chosen.string + + def test_unsupported_pseudoclass(self): + with pytest.raises(SOUPSIEVE_EXCEPTION_ON_UNSUPPORTED_PSEUDOCLASS): + self._soup.select("a:no-such-pseudoclass") + + with pytest.raises(SelectorSyntaxError): + self._soup.select("a:nth-of-type(a)") + + def test_nth_of_type(self): + # Try to select first paragraph + els = self._soup.select("div#inner p:nth-of-type(1)") + assert len(els) == 1 + assert els[0].string == "Some text" + + # Try to select third paragraph + els = self._soup.select("div#inner p:nth-of-type(3)") + assert len(els) == 1 + assert els[0].string == "Another" + + # Try to select (non-existent!) fourth paragraph + els = self._soup.select("div#inner p:nth-of-type(4)") + assert len(els) == 0 + + # Zero will select no tags. + els = self._soup.select("div p:nth-of-type(0)") + assert len(els) == 0 + + def test_nth_of_type_direct_descendant(self): + els = self._soup.select("div#inner > p:nth-of-type(1)") + assert len(els) == 1 + assert els[0].string == "Some text" + + def test_id_child_selector_nth_of_type(self): + self.assert_css_selects("#inner > p:nth-of-type(2)", ["p1"]) + + def test_select_on_element(self): + # Other tests operate on the tree; this operates on an element + # within the tree. + inner = self._soup.find("div", id="main") + selected = inner.select("div") + # The
tag was selected. The