diff --git a/.github/workflows/root-634.yml b/.github/workflows/root-634.yml
new file mode 100644
index 0000000000000..50cff9a24bd0e
--- /dev/null
+++ b/.github/workflows/root-634.yml
@@ -0,0 +1,34 @@
+
+name: 'ROOT 6.34'
+
+on:
+ schedule:
+ - cron: '01 1 * * *'
+
+ workflow_dispatch:
+ inputs:
+ incremental:
+ description: 'Do incremental build'
+ type: boolean
+ required: true
+ default: true
+ binaries:
+ description: Create binary packages and upload them as artifacts
+ type: boolean
+ required: true
+ default: false
+ buildtype:
+ description: The CMAKE_BUILD_TYPE to use for non-Windows.
+ type: choice
+ options:
+ - Debug
+ - RelWithDebInfo
+ - Release
+ - MinSizeRel
+ default: Debug
+ required: true
+
+jobs:
+ run_nightlies:
+ uses: root-project/root/.github/workflows/root-ci.yml@v6-34-00-patches
+ secrets: inherit
diff --git a/.github/workflows/root-ci-config/buildconfig/fedora41.txt b/.github/workflows/root-ci-config/buildconfig/fedora41.txt
new file mode 100644
index 0000000000000..cb1da137b6b97
--- /dev/null
+++ b/.github/workflows/root-ci-config/buildconfig/fedora41.txt
@@ -0,0 +1,5 @@
+builtin_zstd=ON
+builtin_zlib=ON
+builtin_nlohmannjson=On
+builtin_vdt=On
+pythia8=Off
diff --git a/.github/workflows/root-ci-config/buildconfig/ubuntu2410.txt b/.github/workflows/root-ci-config/buildconfig/ubuntu2410.txt
new file mode 100644
index 0000000000000..58d56a8a0b9c1
--- /dev/null
+++ b/.github/workflows/root-ci-config/buildconfig/ubuntu2410.txt
@@ -0,0 +1,2 @@
+pythia8=OFF
+tmva-cpu=OFF
diff --git a/.github/workflows/root-ci.yml b/.github/workflows/root-ci.yml
index 551a4dcd6b60a..3b75040d98a4e 100644
--- a/.github/workflows/root-ci.yml
+++ b/.github/workflows/root-ci.yml
@@ -20,13 +20,13 @@ on:
inputs:
head_ref:
type: string
- default: master
+ default: v6-34-00-patches
base_ref:
type: string
- default: master
+ default: v6-34-00-patches
ref_name:
type: string
- default: master
+ default: v6-34-00-patches
# Enables manual start of workflow
workflow_dispatch:
@@ -153,6 +153,9 @@ jobs:
with:
build-directory: /Users/sftnight/ROOT-CI/src/
+ - name: Set up curl CA bundle for Davix to work with https
+ run: 'echo SSL_CERT_FILE=/opt/local/share/curl/curl-ca-bundle.crt >> $GITHUB_ENV'
+
- name: Pull Request Build
if: github.event_name == 'pull_request'
env:
@@ -358,6 +361,8 @@ jobs:
include:
- image: fedora40
overrides: ["LLVM_ENABLE_ASSERTIONS=On", "CMAKE_CXX_STANDARD=20"]
+ - image: fedora41
+ overrides: ["LLVM_ENABLE_ASSERTIONS=On"]
- image: alma8
overrides: ["LLVM_ENABLE_ASSERTIONS=On"]
- image: alma9
@@ -368,6 +373,8 @@ jobs:
overrides: ["imt=Off", "LLVM_ENABLE_ASSERTIONS=On", "CMAKE_BUILD_TYPE=Debug"]
- image: ubuntu2404
overrides: ["LLVM_ENABLE_ASSERTIONS=On", "CMAKE_BUILD_TYPE=Debug"]
+ - image: ubuntu2410
+ overrides: ["LLVM_ENABLE_ASSERTIONS=On", "CMAKE_BUILD_TYPE=Debug"]
- image: debian125
overrides: ["LLVM_ENABLE_ASSERTIONS=On", "CMAKE_CXX_STANDARD=20"]
# Special builds
@@ -388,10 +395,11 @@ jobs:
is_special: true
property: clang
overrides: ["LLVM_ENABLE_ASSERTIONS=On", "CMAKE_C_COMPILER=clang", "CMAKE_CXX_COMPILER=clang++"]
- - image: ubuntu2404-cuda
- is_special: true
- property: gpu
- extra-runs-on: gpu
+ # Disable until the DNS issues are understood
+ # - image: ubuntu2404-cuda
+ # is_special: true
+ # property: gpu
+ # extra-runs-on: gpu
runs-on:
- self-hosted
diff --git a/.github/workflows/root-docs-634.yml b/.github/workflows/root-docs-634.yml
new file mode 100644
index 0000000000000..ad30f934a1ded
--- /dev/null
+++ b/.github/workflows/root-docs-634.yml
@@ -0,0 +1,25 @@
+
+name: 'ROOT Docs 6.34'
+
+on:
+ schedule:
+ - cron: '0 1 * * *'
+ - cron: '0 12 * * *'
+
+ workflow_dispatch:
+ inputs:
+ incremental:
+ description: 'Do incremental build'
+ type: boolean
+ required: true
+ default: true
+ # docu_input: # opportunity: overwrite makeinput.sh with these args
+ # description: Folders to build documentation for. All folders are built if empty.
+ # type: string
+ # default: ""
+ # required: false
+
+jobs:
+ run_nightlies:
+ uses: root-project/root/.github/workflows/root-docs-ci.yml@v6-34-00-patches
+ secrets: inherit
diff --git a/.github/workflows/root-docs-ci.yml b/.github/workflows/root-docs-ci.yml
new file mode 100644
index 0000000000000..4049b89c6ee78
--- /dev/null
+++ b/.github/workflows/root-docs-ci.yml
@@ -0,0 +1,181 @@
+name: 'ROOT Docs CI'
+
+on:
+
+ # Allows nightly builds to trigger one run for each branch easily, by
+ # providing the relevant branch as "default" value here:
+ workflow_call:
+ inputs:
+ incremental:
+ type: boolean
+ default: true
+
+ workflow_dispatch:
+ inputs:
+ incremental:
+ description: 'Do incremental build'
+ type: boolean
+ required: true
+ default: true
+ # docu_input: # opportunity: overwrite makeinput.sh with these args
+ # description: Folders to build documentation for. All folders are built if empty.
+ # type: string
+ # default: ""
+ # required: false
+
+jobs:
+ build-docs:
+ if: github.repository_owner == 'root-project'
+
+ runs-on:
+ - self-hosted
+ - linux
+ - x64
+
+ env:
+ PLATFORM: alma9
+ DOC_DIR: v6-34-00-patches
+ DOC_LOCATION: /github/home
+ BASE_REF: v6-34-00-patches
+ WEB_DIR_NAME: v634
+ TAR_NAME: html634.tar
+
+ permissions:
+ contents: read
+
+ container:
+ image: registry.cern.ch/root-ci/alma9:buildready # ALSO UPDATE BELOW!
+ options: '--security-opt label=disable --rm --name rootdoc' # ALSO UPDATE BELOW!
+ env:
+ OS_APPLICATION_CREDENTIAL_ID: '7f5b64a265244623a3a933308569bdba'
+ OS_APPLICATION_CREDENTIAL_SECRET: ${{ secrets.OS_APPLICATION_CREDENTIAL_SECRET }}
+ OS_AUTH_TYPE: 'v3applicationcredential'
+ OS_AUTH_URL: 'https://keystone.cern.ch/v3'
+ OS_IDENTITY_API_VERSION: 3
+ OS_INTERFACE: 'public'
+ OS_REGION_NAME: 'cern'
+ PYTHONUNBUFFERED: true
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - name: Set up Python Virtual Env
+ # if the `if` expr is false, `if` still has exit code 0.
+ # if the `if` block is entered, the block's exit code becomes the exit
+ # code of the `if`.
+ run: 'if [ -d /py-venv/ROOT-CI/bin/ ]; then . /py-venv/ROOT-CI/bin/activate && echo PATH=$PATH >> $GITHUB_ENV; fi'
+
+ - name: Set up directory name and tar filenames
+ run: |
+ echo TAR_NAME=html${BASE_REF}.tar >> $GITHUB_ENV
+ echo DOCDIR_NAME=${BASE_REF} >> $GITHUB_ENV
+
+ # TODO: install latest versions in image on root-ci-images
+ - name: Install Doxygen 1.10.0
+ run : |
+ mkdir -p ${{ github.workspace }}/doxygen
+ curl -L https://github.com/doxygen/doxygen/releases/download/Release_1_10_0/doxygen-1.10.0.linux.bin.tar.gz | tar -xz -C ${{ github.workspace }}/doxygen/ --strip-components=1
+ echo PATH=$PATH:${{ github.workspace }}/doxygen/bin >> $GITHUB_ENV
+
+ - name: Install qhelpgenerator-qt5
+ run: |
+ dnf update -y
+ dnf upgrade -y
+ dnf install -y qt5-doctools
+ which qhelpgenerator-qt5
+
+ - name: Apply option overrides
+ env:
+ OVERRIDES: "testing=Off roottest=Off minimal=On"
+ CONFIGFILE: '.github/workflows/root-ci-config/buildconfig/alma9.txt'
+ shell: bash
+ run: |
+ set -x
+ echo '' >> "$CONFIGFILE"
+ for ENTRY in $OVERRIDES; do
+ KEY=$( echo "$ENTRY" | cut -d '=' -f 1 )
+ # Add entry to file if not exists, otherwise replace
+ if grep -q "$KEY=" "$CONFIGFILE"; then
+ sed -i "s/$KEY=.*\$/$ENTRY/" "$CONFIGFILE"
+ else
+ echo "$ENTRY" >> "$CONFIGFILE"
+ fi
+ done
+ cat "$CONFIGFILE" || true
+
+ - name: Build ROOT - Workflow Dispatch
+ if: github.event_name == 'workflow_dispatch'
+ run: ".github/workflows/root-ci-config/build_root.py
+ --buildtype Release
+ --platform ${{ env.PLATFORM }}
+ --incremental false
+ --base_ref ${BASE_REF}
+ --head_ref ${BASE_REF}
+ --binaries false
+ --repository ${{ github.server_url }}/${{ github.repository }}"
+
+ - name: Build ROOT - Schedule
+ if: github.event_name == 'schedule'
+ run: ".github/workflows/root-ci-config/build_root.py
+ --buildtype Release
+ --platform ${{ env.PLATFORM }}
+ --incremental false
+ --base_ref ${BASE_REF}
+ --head_ref ${BASE_REF}
+ --binaries false
+ --repository ${{ github.server_url }}/${{ github.repository }}"
+
+ - name: Run Doxygen
+ working-directory: ${{ env.DOC_LOCATION }}
+ shell: bash
+ run: |
+ source ROOT-CI/build/bin/thisroot.sh
+ export DOXYGEN_OUTPUT_DIRECTORY=/github/home/${DOC_DIR}
+ cd ROOT-CI/src/documentation/doxygen
+ make -j `nproc --all`
+
+ - name: Create documentation archives
+ working-directory: ${{ env.DOC_LOCATION }}
+ shell: bash
+ run: |
+ pwd
+ ls -l
+ echo ${DOC_DIR}
+ echo ${TAR_NAME}
+ ls -l ${DOC_DIR}
+ tar cf ${TAR_NAME} ${DOC_DIR}
+ gzip ${TAR_NAME}
+ ls -l
+
+ #Upload to GitHub as an artifact
+ - name: Upload tar file for GH
+ if: ${{ !cancelled() }}
+ uses: actions/upload-artifact@v4
+ with:
+ name: ${{env.TAR_NAME}}.gz
+ path: ${{env.DOC_LOCATION}}/${{env.TAR_NAME}}.gz
+ if-no-files-found: error
+
+ - name: Install AWS CLI
+ run: |
+ python -m pip install --upgrade pip
+ pip install awscli==1.36.40
+ aws configure set default.s3.max_concurrent_requests 128
+
+ - name: Sync documentation to S3
+ working-directory: ${{ env.DOC_LOCATION }}
+ env:
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ AWS_ENDPOINT_URL: https://s3.cern.ch/
+
+ run: |
+ pwd
+ ls -l
+ aws s3 sync ${DOC_DIR}/html/ s3://root/doc/${WEB_DIR_NAME}/
+ rm -rf ${DOC_DIR}/html
+ aws s3 sync ${DOC_DIR}/ s3://root/doc/${WEB_DIR_NAME}/
+ aws s3 cp ${TAR_NAME}.gz s3://root/download/
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 0b769aa555cb9..be930aea2a798 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -125,7 +125,7 @@ function(relatedrepo_GetClosestMatch)
# Otherwise, try to use a branch that matches `current_head` in the fork repository
execute_process(COMMAND ${GIT_EXECUTABLE} ls-remote --heads --tags
- ${__ORIGIN_PREFIX}/${__REPO_NAME} ${current_head} OUTPUT_VARIABLE matching_refs)
+ ${__ORIGIN_PREFIX}/${__REPO_NAME} ${current_head} OUTPUT_VARIABLE matching_refs ERROR_QUIET)
if(NOT "${matching_refs}" STREQUAL "")
set(${__FETCHURL_VARIABLE} ${__ORIGIN_PREFIX}/${__REPO_NAME} PARENT_SCOPE)
return()
@@ -644,8 +644,12 @@ if(testing)
endif()
if(DEFINED repo_dir)
execute_process(COMMAND ${GIT_EXECUTABLE} --git-dir=${repo_dir}/.git
- remote get-url origin OUTPUT_VARIABLE originurl OUTPUT_STRIP_TRAILING_WHITESPACE)
-
+ remote get-url origin OUTPUT_VARIABLE originurl OUTPUT_STRIP_TRAILING_WHITESPACE
+ RESULT_VARIABLE query_result
+ ERROR_VARIABLE query_error)
+ if(NOT query_result EQUAL 0)
+ message(STATUS "Searching for \"origin\" repo of roottest: ${query_error}")
+ endif()
else()
# The fetch URL of the 'origin' remote is used to determine the prefix for other repositories by
# removing the `/root(\.git)?` part. If `GITHUB_PR_ORIGIN` is defined in the environment, its
@@ -657,7 +661,7 @@ if(testing)
remote get-url origin OUTPUT_VARIABLE originurl OUTPUT_STRIP_TRAILING_WHITESPACE)
endif()
endif()
- string(REGEX REPLACE "/root(test)?(\.git)?$" "" originprefix ${originurl})
+ string(REGEX REPLACE "/root(test)?(\.git)?$" "" originprefix "${originurl}")
relatedrepo_GetClosestMatch(REPO_NAME roottest
ORIGIN_PREFIX ${originprefix} UPSTREAM_PREFIX ${upstreamprefix}
FETCHURL_VARIABLE roottest_url FETCHREF_VARIABLE roottest_ref)
@@ -690,7 +694,15 @@ if(testing)
endif()
if(LLVM_LINKER_IS_MOLD)
- message(FATAL_ERROR "The mold linker is not supported by ROOT. Please use a different linker")
+ execute_process(
+ COMMAND mold --version
+ OUTPUT_VARIABLE MOLD_VERSION
+ OUTPUT_STRIP_TRAILING_WHITESPACE
+ )
+ string(REGEX REPLACE "mold ([0-9]+\\.[0-9]+\\.[0-9]+).*" "\\1" MOLD_VERSION "${MOLD_VERSION}")
+ if(MOLD_VERSION VERSION_LESS "2.32.0")
+ message(FATAL_ERROR "The mold linker version ${MOLD_VERSION} is not supported by ROOT. Please use mold >= 2.32.0 or a different linker")
+ endif()
endif()
cmake_host_system_information(RESULT PROCESSOR QUERY PROCESSOR_DESCRIPTION)
diff --git a/README/ReleaseNotes/v634/index.md b/README/ReleaseNotes/v634/index.md
index 227f88c56f826..d9ad1613bb5cf 100644
--- a/README/ReleaseNotes/v634/index.md
+++ b/README/ReleaseNotes/v634/index.md
@@ -1,10 +1,15 @@
% ROOT Version 6.34 Release Notes
-% 2025-05
+% 2024-11
+## Important note about this development release
+
+6.34 is a short term support cycle not meant to be used for data taking. It will be superseded by the 6.36 cycle, which is foreseen to start with 6.36.00 in the second quarter of 2025. Patch releases of the 6.36 cycle will be provided until June 30th 2025.
+
+
## Introduction
-ROOT version 6.34.00 is scheduled for release at the end of May 2025.
+The development ROOT version 6.34.00 is scheduled for release at the end of November 2024.
For more information, see:
@@ -12,11 +17,10 @@ For more information, see:
The following people have contributed to this new version:
- Anton Alkin, Sungkyunkwan University\
Guilherme Amadio, CERN/IT,\
- Abhigyan Acherjee, University of Cincinnati,\
Bertrand Bellenot, CERN/EP-SFT,\
Jakob Blomer, CERN/EP-SFT,\
+ Patrick Bos, Netherlands eScience Center,\
Rene Brun,\
Carsten Burgard, DESY\
Will Buttinger, RAL,\
@@ -25,38 +29,40 @@ The following people have contributed to this new version:
Olivier Couet, CERN/EP-SFT,\
Marta Czurylo, CERN/EP-SFT,\
Monica Dessole, CERN/EP-SFT,\
+ Adrian Duesselberg, TU Munchen,\
Mattias Ellert, Uppsala University,\
Gerri Ganis, CERN/EP-SFT,\
Florine de Geus, CERN/University of Twente,\
Andrei Gheata, CERN/EP-SFT,\
- Bernhard Manfred Gruber,\
Enrico Guiraud,\
+ Stephan Hageboeck, CERN/EP-SFT,\
Jonas Hahnfeld, CERN/Goethe University Frankfurt,\
- Fernando Hueso Gonzalez, University of Valencia\
+ Fernando Hueso Gonzalez, University of Valencia,\
Attila Krasznahorkay, CERN/EP-ADP-OS,\
Wim Lavrijsen, LBL,\
- Valerii Kholoimov, National University of Kyiv/IRIS-HEP, \
- Dennis Klein, GSI,\
- Christoph Langenbruch, Heidelberg University/LHCb,\
+ Aaron Jomy, CERN/EP-SFT,\
+ Ida Kaspary, Imperial College,\
+ Valerii Kholoimov, National University of Kyiv/IRIS-HEP,\
Sergey Linev, GSI,\
Javier Lopez-Gomez,\
Pere Mato, CERN/EP-SFT,\
+ Andrea Maria Ola Mejicanos, Berea College,\
Alaettin Serhan Mete, Argonne,\
Thomas Madlener, DESY,\
+ Vedant Mehra, GSOC, \
Lorenzo Moneta, CERN/EP-SFT,\
Alja Mrak Tadel, UCSD/CMS,\
Axel Naumann, CERN/EP-SFT,\
- Dante Niewenhuis, VU Amsterdam\
- Luis Antonio Obis Aparicio, University of Zaragoza,\
Ianna Osborne, Princeton University,\
Vincenzo Eduardo Padulano, CERN/EP-SFT,\
+ Giacomo Parolini, CERN/EP-SFT,\
Danilo Piparo, CERN/EP-SFT,\
+ Kristupas Pranckietis, Vilnius University,\
Fons Rademakers, CERN/IT,\
Jonas Rembser, CERN/EP-SFT,\
Andrea Rizzi, University of Pisa,\
Andre Sailer, CERN/EP-SFT,\
- Garima Singh, ETH,\
- Juraj Smiesko, CERN/RCS-PRJ-FC,\
+ Nopphakorn Subsa-Ard, KMUTT,\
Pavlo Svirin, National Technical University of Ukraine,\
Robin Syring, Leibniz University Hannover, CERN/EP-SFT,\
Maciej Szymanski, Argonne,\
@@ -69,15 +75,26 @@ The following people have contributed to this new version:
Wouter Verkerke, NIKHEF/ATLAS,\
Stefan Wunsch\
-## Deprecation and Removal
+## Removal and Deprecation
+
+The following interfaces have been removed:
- The `RooAbsReal::plotSliceOn()` function that was deprecated since at least ROOT 6 was removed. Use `plotOn(frame,Slice(...))` instead.
+- Multiple overloads of internal Minuit 2 constructors and functions have been removed. If your code fails to compile, you can easily change to another overload that takes a `MnUserParameterState`, which is a change backwards compatible with older ROOT versions.
+
+The following interfaces are deprecated and will be removed in future releases:
+
- The `RooTemplateProxy` constructors that take a `proxyOwnsArg` parameter to manually pass ownership are deprecated and replaced by a new constructor that takes ownership via `std::unique_ptr`. They will be removed in ROOT 6.36.
- Several RooFit legacy functions are deprecated and will be removed in ROOT 6.36 (see section "RooFit libraries")
-- Multiple overloads of internal Minuit 2 constructors and functions have been removed. If your code fails to compile, you can easily change to another overload that takes a `MnUserParameterState`, which is a change backwards compatible with older ROOT versions.
+- The `int ROOT::CompressionSettings(ROOT::ECompressionAlgorithm algorithm, int compressionLevel)` function is deprecated and will be removed in ROOT 6.36. Please use `int CompressionSettings(RCompressionSetting::EAlgorithm::EValues algorithm, int compressionLevel)` instead.
+- The `void R__zip(int cxlevel, int *srcsize, char *src, int *tgtsize, char *tgt, int *irep)` function is deprecated and will be removed in ROOT 6.36. Please use `void R__zipMultipleAlgorithm(int cxlevel, int *srcsize, char *src, int *tgtsize, char *tgt, int *irep, ROOT::RCompressionSetting::EAlgorithm::EValues algorithm)` instead.
+- The `Bool_t TGeoShape::AreOverlapping(const TGeoBBox *box1, const TGeoMatrix *mat1, const TGeoBBox *box2, const TGeoMatrix *mat2)` function is deprecated and will be removed in ROOT 6.36.
+- The `TPython::Eval()` function is deprecated and scheduled for removal in ROOT 6.36.
+
## Core Libraries
+* The Cling C++ interpreter now relies on LLVM version 18.
* The `rootcling` invocation corresponding to a `genreflex` invocation can be obtained with the new `genreflex`
command line argument `--print-rootcling-invocation`. This can be useful when migrating from genreflex to
rootcling.
@@ -85,8 +102,46 @@ The following people have contributed to this new version:
## I/O Libraries
+## RNTuple Libraries
+
+* The first version of the `RNTuple` on-disk binary format is finalized. Future versions of ROOT will be able to read back
+ RNTuple data written as of this release. Please note that this version breaks compatibility with experimental RNTuple
+ data written with releases up to v6.34. Please also note that the RNTuple API is not yet moving out of
+ `ROOT::Experimental`.
+* Support for low-precision on-disk floating point representation. This can be enabled through
+ `RField::SetTruncated()` (truncated mantissa) and `RField::SetQuantized()`
+ (scaled integer representation).
+* Link RNTuple self-description to the common ROOT streamer infrastructure. As a result, `TFile::MakeProject()`
+ properly creates header files for classes used in RNTuple data.
+* First version of the new `RNTupleProcessor` class. The `RNTupleProcessor` will support iteration of composed RNTuple data sets (comparable to and improving upon TTree friends and chains). This release supports chained (vertically composed) RNTuples. Other types of concatenations will be added in subsequent releases.
+* Support for cluster staging in the `RNTupleParallelWriter`. Cluster staging enables users to enforce a certain
+ logical cluster ordering in the presence of parallel cluster writing.
+* Support for Direct I/O for writing. This gives access to the peak performance of modern NVMe drives.
+* Support for a "streamer field" that can wrap classic ROOT I/O serialized data for RNTuple in cases where native
+ RNTuple support is not possible (e.g., recursive data structures). Use of the streamer field can be enforced
+ through the LinkDef option `rntupleStreamerMode(true)`. This features is similar to the unsplit/level-0-split branch in `TTree`.
+* Naming rules have been established for the strings representing the name of an RNTuple and the name of a field. The
+ allowed character set is restricted to Unicode characters encoded as UTF-8, with the following exceptions: control
+ codes, full stop, space, backslash, slash. See a full description in the RNTuple specification. The naming rules are
+ also enforced when creating a new RNTuple or field for writing.
+* Many fixes to RNTuple merging, both through `hadd` and when using the `RNTupleMerger` class directly. Most notable
+ of these fixes is the proper handling of projected fields.
+* Many additional bug fixes and improvements.
## TTree Libraries
+* TTreeReader can now detect whether there is a mismatched number of entries between the main trees and the friend tree
+ and act accordingly in two distinct scenarios. In the first scenario, at least one of the friend trees is shorter than
+ the main tree, i.e. it has less entries. When the reader is trying to load an entry from the main tree which is beyond
+ the last entry of the shorter friend, this will result in an error and stop execution. In the second scenario, at
+ least one friend is longer than the main tree, i.e. it has more entries. Once the reader arrives at the end of the
+ main tree, it will issue a warning informing the user that there are still entries to be read from the longer friend.
+* TTreeReader can now detect whether a branch, which was previously expected to exist in the dataset, has disappeared
+ due to e.g. a branch missing when switching to the next file in a chain of files.
+* TTreeReader can now detect whether an entry being read is incomplete due to one of the following scenarios:
+ * When switching to a new tree in the chain, a branch that was expected to be found is not available.
+ * When doing event matching with TTreeIndex, one or more of the friend trees did not match the index value for
+ the current entry.
+
## RDataFrame
@@ -96,22 +151,67 @@ The following people have contributed to this new version:
code that was not yet available on the user's local application, but that would only become available in the
distributed worker. Now a call such as `df.Define("mycol", "return run_my_fun();")` needs to be at least declarable
to the interpreter also locally so that the column can be properly tracked.
+* The order of execution of operations within the same branch of the computation graph is now guaranteed to be top to
+ bottom. For example, the following code:
+ ~~~{.cpp}
+ ROOT::RDataFrame df{1};
+ auto df1 = df.Define("x", []{ return 11; });
+ auto df2 = df1.Define("y", []{ return 22; });
+ auto graph = df2.Graph("x","y");
+ ~~~
+ will first execute the operation `Define` of the column `x`, then the one of the column `y`, when filling the graph.
+* The `DefinePerSample` operation now works also in the case when a TTree is stored in a subdirectory of a TFile.
+* The memory usage of distributed RDataFrame was drastically reduced by better managing caches of the computation graph
+ artifacts. Large applications which previously had issues with killed executors due to being out of memory now show a
+ minimal memory footprint. See https://github.com/root-project/root/pull/16094#issuecomment-2252273470 for more details.
+* RDataFrame can now read TTree branches of type `std::array` on disk explicitly as `std::array` values in memory.
+* New parts of the API were added to allow dealing with missing data in a TTree-based dataset:
+ * DefaultValueFor(colname, defaultval): lets the user provide one default value for the current entry of the input
+ column, in case the value is missing.
+ * FilterAvailable(colname): works in the same way as the traditional Filter operation, where the "expression" is "is
+ the value available?". If so, the entry is kept, if not, it is discarded.
+ * FilterMissing(colname): works in the same way as the traditional Filter operation, where the "expression" is "is
+ the value missing?". If so, the entry is kept, if not, it is discarded.
+ The tutorials `df036_missingBranches` and `df037_TTreeEventMatching` show example usage of the new functionalities.
+* The automatic conversion of `std::vector` to `ROOT::RVec` which happens in memory within a JIT-ted RDataFrame
+ computation graph meant that the result of a `Snapshot` operation would implicitly change the type of the input branch.
+ A new option available as the data member `fVector2RVec` of the `RSnapshotOptions` struct can be used to prevent
+ RDataFrame from making this implicit conversion.
+* RDataFrame does not take a lock anymore to check reading of supported types when there is a mismatch, see
+ https://github.com/root-project/root/pull/16528.
+* Complexity of lookups during internal checks for type matching has been made constant on average, see the discussions
+ at https://github.com/root-project/root/pull/16559 and https://github.com/root-project/root/pull/16559.
+* Major improvements have been brought to the experimental feature that allows lazily loading ROOT data into batches for
+ machine learning model training pipelines. For a full description, see the presentation at CHEP 2024
+ https://indico.cern.ch/event/1338689/contributions/6015940/.
## Histogram Libraries
+* `THStack:GetMinimum()` was not correct in case of negative contents.
+
### Upgrade TUnfold to version 17.9
The [TUnfold package](https://www.desy.de/~sschmitt/tunfold.html) inside ROOT is upgraded from version 17.6 to version 17.9.
## Math Libraries
-### Usage of `std::span` in Minuit 2 interfaces
+### Minuit2
-To avoid forcing the user to do manual memory allocations via `std::vector`, the interfaces of Minuit 2 function adapter classes like `ROOT::Minuit2::FCNBase` or `ROOT::Minuit2::FCNGradientBase` were changed to accept `std::span` arguments instead of `std::vector const&`.
+* **Usage of `std::span`in the interface**: To avoid forcing the user to do manual memory allocations via `std::vector`, the interfaces of Minuit 2 function adapter classes like `ROOT::Minuit2::FCNBase` or `ROOT::Minuit2::FCNGradientBase` were changed to accept `std::span` arguments instead of `std::vector const&`.
This should have minimal impact on users, since one should usual use Minuit 2 via the `ROOT::Math::Minimizer` interface, which is unchanged.
+* **Initial error/covariance matrix values for Hessian matrix**: Initial error/covariance matrix values can be passed for initializating the Hessian matrix to be used in minimization algorithms by attaching the covariance matrix to the `ROOT::Minuit2::MnUserParameterState` instance used for seeding via the method `AddCovariance(const MnUserCovariance &);`.
+
## RooFit Libraries
+### Error handling in MultiProcess-enabled fits
+
+The `MultiProcess`-based fitting stack now handles errors during fits.
+Error signaling in (legacy) RooFit happens through two mechanisms: `logEvalError` calls and `RooNaNPacker`-enhanced NaN doubles.
+Both are now implemented and working for `MultiProcess`-based fits as well.
+See [this PR](https://github.com/root-project/root/pull/15797) for more details.
+This enables the latest ATLAS Higgs combination fits to complete successfully, and also other fits that encounter NaN values or other expected errors.
+
### Miscellaneous
* Setting `useHashMapForFind(true)` is not supported for RooArgLists anymore, since hash-assisted finding by name hash can be ambiguous: a RooArgList is allowed to have different elements with the same name. If you want to do fast lookups by name, convert your RooArgList to a RooArgSet.
@@ -120,6 +220,10 @@ This should have minimal impact on users, since one should usual use Minuit 2 vi
* The `ExportOnly()` attribute of the `RooStats::HistFactory::Measurement` object is now switched on by default, and the associated getter and setter functions are deprecated. They will be removed in ROOT 6.36. If you want to fit the model as well instead of just exporting it to a RooWorkspace, please do so with your own code as demonstrated in the `hf001` tutorial.
+* Initial error values can be used for initializating the Hessian matrix to be used in Minuit2 minimization algorithms by setting the `RooMinimizer::Config` option `setInitialCovariance` to `true`. These values correspond to the diagonal entries of the initial covariance matrix.
+
+* `RooFit::MultiProcess`-enabled fitting developer/advanced documentation -- [available through GitHub](https://github.com/root-project/root/blob/master/roofit/doc/developers/test_statistics.md) -- was updated. It now contains the most up to date usage instructions for optimizing load balancing (and hence run speed) using this backend.
+
### Deprecations
* The `RooStats::MarkovChain::GetAsDataSet` and `RooStats::MarkovChain::GetAsDataHist` functions are deprecated and will be removed in ROOT 6.36. The same functionality can be implemented by calling `RooAbsData::reduce` on the Markov Chain's `RooDataSet*` (obtained using `MarkovChain::GetAsConstDataSet`) and then obtaining its binned clone(for `RooDataHist`).
@@ -144,33 +248,97 @@ They should be replaced with the suitable alternatives interfaces:
- `RooAbsArg::checkDependents()`: use `checkObservables()`
- `RooAbsArg::recursiveCheckDependents()`: use `recursiveCheckObservables()`
+## TMVA SOFIE
+The support for new ONNX operators has been included in the SOFIE ONNX parser and in RModel in order to generate inference code for new types of models.
+The full list of currently supported operators is available [here](https://github.com/root-project/root/blob/master/tmva/sofie/README.md#supported-onnx-operators)
+
+The list of operators added for this release is the following:
+ - Constant and ConstantOfShape
+ - If
+ - Range
+ - ReduceSum
+ - Split
+ - Tile
+ - TopK
+
+In addition support in RModel has been added to generate the code with dynamic input shape parameter, such as the batch size. These input shape parameters can be specified at run time when evaluating the model.
+Since not all ONNX operators in SOFIE support yet dynamic input parameters, it is possible to initialize a parsed dynamic model with fixed values. For this, a new member function, `RModel::Initialize(const std::map & inputParams, bool verbose = false)` has been added.
+The RModel class has been extended to support sub-graph (needed for operator `If`), dynamic tensors and constant tensors (for example those defined by the operator `Constant`).
+
## Graphics Backends
+### Web-based TWebCanvas
+
+Support "haxis" draw option for histograms, allows superposition of several histograms drawn on the same pad with horizontal ty axis. Add `tutorials\webcanv\haxis.cxx` macro demonstrating new feature.
+
+Support "frame" draw option for several primitives like `TBox`, `TLine`, `TLatex`. This enforce clipping of such objects by
+frame border. Provide demo in `tutorials\webcanv\inframe.cxx` macro
+
+Provide batch mode for image production with headless browser. In such mode data for several canvases collected together (in batch) and then N images are produced with single invocation of the web browser (chrome or firefox). For instance after `TWebCanvas::BatchImageMode(100)` next 99 calls to `TCanvas::SaveAs(filename)` method will not lead to image files creation. But with following call all 100 images will be produced together. Alternatively one can use `TCanvas::SaveAll()` static method which allows to create images for several canvases at once.
+
+Support multi-page PDF file creation with web-based canvas using `svg2pdf.js` library. Both with native and web-baed graphics one can do now:
+```c++
+c1->SaveAs("file.pdf[")
+c2->SaveAs("file.pdf+")
+c3->SaveAs("file.pdf+")
+c4->SaveAs("file.pdf]")
+```
+Or same can be achieved with:
+```c++
+TCanvas::SaveAll({c1, c2, c3, c4}, "file.pdf");
+```
+
## 2D Graphics Libraries
+* In `TGraphErrors` `TGraphAsymmErrors` and `TGraphBentErrors`, the error bars were drawn inside the marker when the marker was bigger than the error bars. This produced a weird plot. This is now fixed.
+
+* When error-bars exceeded the y range limits the end of error bars were nevertheless displayed was not correcton the x-bottom and top axis. So it looked like the total error bar while it was indeed not.
+* Choosing an appropriate color scheme is essential for making results easy to understand and interpret. Factors like colorblindness and converting colors to grayscale for publications can impact accessibility. Furthermore, results should be aesthetically pleasing. The following three color schemes, recommended by M. Petroff in [arXiv:2107.02270v2](https://arxiv.org/pdf/2107.02270) and available on [GitHub](https://github.com/mpetroff/accessible-color-cycles) under the MIT License, meet these criteria.
+
+* Implement properly the TScatter palette attributes as requested [here](https://github.com/root-project/root/issues/15922).
+
+* Add `TStyle::SetLegendFillStyle`
## 3D Graphics Libraries
+### REve
+* Update RenderCore rendering engine to version 1.6 with improved
+implementation of Signed Distance Field (SDF) fonts.
-## Geometry Libraries
+* Implement REveText element to draw text with SDF fonts in screen or
+world coordinates. See the new example in tutorials/eve7/texts.C
+* Add initial version of REve overlays: a 2D area in screen coordinates
+that can draw text and frames in relative proportions; support position
+and scale editing on the client side.
-## Database Libraries
+* Draw axis labels with SDF fonts in the mixed space-screen coordinate
+system.
+* Introduce REveGeoTopNode: a wrapper over a TGeoNode, possibly
+displaced with a global transformation stored in REveElement. It holds a
+pointer to TGeoManager and controls for steering of TGeoPainter
+(fVisOption, fVisLevel and fMaxVisNodes).
-## Networking Libraries
+* Integrate JSRoot hierarchical node browser in REve as REveGeoTable
+element. The demonstration of this feature is included in example
+tutorial/eve7/eveGeoBrowser.C
-## GUI Libraries
+## Geometry Libraries
+The geometry package is now optional and activated by default in the CMake configuration. To disable it, use the `-Dgeom=OFF` CMake option.
-## Montecarlo Libraries
+## Web-based GUIs
+Adjust `rootssh` script to be usable on MacOS. Fixing problem to start more than one web widget on remote node.
-## PROOF Libraries
+Fix `rootbrowse` script to be able properly use it with all kinds of web widgets. Provide `--web=` argument as for
+regular root executable.
+Update openui5 library to version 1.128.0. Requires use of modern web-browsers, skipping IE support.
-## PyROOT
+## Python Interface
### Typesafe `TTree::SetBranchAddress()` for array inputs
@@ -207,19 +375,317 @@ std::any result;
TPython::Exec("_anyresult = ROOT.std.make_any['std::string']('done')", &result);
std::cout << std::any_cast(result) << std::endl;
```
-
-## Language Bindings
-
-
## JavaScript ROOT
+Upgrade to JSROOT 7.8.0 with following new features and fixes:
+
+1. Let use custom time zone for time display, support '&utc' and '&cet' in URL parameters
+2. Support gStyle.fLegendFillStyle
+3. Let change histogram min/max values via context menu
+4. Support Z-scale zooming with `TScatter`
+5. Implement "haxis" draw option for histogram to draw only axes for hbar
+6. Implement "axisg" and "haxisg" to draw axes with grids
+7. Support `TH1` marker, text and line drawing superimposed with "haxis"
+8. Support `TBox`, `TLatex`, `TLine`, `TMarker` drawing on "frame", support drawing on swapped axes
+9. Implement `TProfile` and `TProfile2D` projections https://github.com/root-project/root/issues/15851
+10. Draw total histogram from `TEfficiency` when draw option starts with 'b'
+11. Let redraw `TEfficiency`, `THStack` and `TMultiGraph` with different draw options via hist context menu
+12. Support 'pads' draw options for `TMultiGraph`, support context menu for it
+13. Let drop objects on sub-pads
+14. Properly loads ES6 modules for web canvas
+15. Improve performance of `TH3`/`RH3` drawing by using `THREE.InstancedMesh`
+16. Implement batch mode with '&batch' URL parameter to create SVG/PNG images with default GUI
+17. Adjust node.js implementation to produce identical output with normal browser
+18. Create necessary infrastructure for testing with 'puppeteer'
+19. Support injection of ES6 modules via '&inject=path.mjs'
+20. Using importmap for 'jsroot' in all major HTML files and in demos
+21. Implement `settings.CutAxisLabels` flag to remove labels which may exceed graphical range
+22. Let disable usage of `TAxis` custom labels via context menu
+23. Let configure default draw options via context menu, preserved in the local storage
+24. Let save canvas as JSON file from context menu, object as JSON from inspector
+25. Upgrade three.js r162 -> r168, use r162 only in node.js because of "gl" module
+26. Create unified svg2pdf/jspdf ES6 modules, integrate in jsroot builds
+27. Let create multi-page PDF document - in `TWebCanvas` batch mode
+28. Let add in latex external links via `#url[link]{label}` syntax - including jsPDF support
+29. Support `TAttMarker` style with line width bigger than 1
+30. Provide link to ROOT class documentation from context menus
+31. Implement axis labels and title rotations on lego plots
+32. Internals - upgrade to eslint 9
+33. Internals - do not select pad (aka gPad) for objects drawing, always use assigned pad painter
+34. Fix - properly save zoomed ranges in drawingJSON()
+35. Fix - properly redraw `TMultiGraph`
+36. Fix - show empty bin in `TProfile2D` if it has entries #316
+37. Fix - unzooming on log scale was extending range forever
+38. Fix - display empty hist bin if fSumw2 not zero
+39. Fix - geometry display on android devices
+
+JSROOT is now used as default display in `jupyter`.
+
+
+## Tools
+
+### hadd
+
+* Fixed a bug where in some circumstances `hadd` would not correctly merge objects in nested folders of a ROOT file.
+
## Tutorials
+* New tutorials [accessiblecolorschemes.C](https://root.cern/doc/master/accessiblecolorschemes_8C.html) and [hstackcolorscheme.C](https://root.cern/doc/master/thstackcolorscheme_8C.html).
## Class Reference Guide
## Build, Configuration and Testing Infrastructure
-
+- Coverage of the CI was greatly improved, with Clang builds, Alma9 ARM64 and Alma9 x86 NVidia GPU builds were added to the CI
+
+The following builtins have been updated:
+
+- daviX 0.8.7
+- XRootD 5.7.1
+
+## Bugs and Issues fixed in this release
+
+More than 200 items were addressed for this release. The full list is:
+
+* [[#17040](https://github.com/root-project/root/issues/17040)] - Small difference between kp6Violet implementation and official value from Petroff paper
+* [[#16976](https://github.com/root-project/root/issues/16976)] - Strange overflow bin bar when plotting TH1D with X1 option
+* [[#16946](https://github.com/root-project/root/issues/16946)] - Crash in RDF constructor with empty file list
+* [[#16942](https://github.com/root-project/root/issues/16942)] - another crash in finalization
+* [[#16834](https://github.com/root-project/root/issues/16834)] - `RFieldBase::Create` does not enforce valid field names
+* [[#16826](https://github.com/root-project/root/issues/16826)] - RNTuple unexpected "field iteration over empty fields is unsupported"
+* [[#16796](https://github.com/root-project/root/issues/16796)] - RooBinSamplingPdf does not forward expectedEventsFunc creation calls
+* [[#16784](https://github.com/root-project/root/issues/16784)] - Remove default value of p from TH1::GetQuantiles() as is the case with TF1::GetQuantiles
+* [[#16771](https://github.com/root-project/root/issues/16771)] - copying a default constructed `TH2Poly` fails.
+* [[#16753](https://github.com/root-project/root/issues/16753)] - [ntuple] Free uncompressed page buffers in RPageSinkBuf with IMT
+* [[#16752](https://github.com/root-project/root/issues/16752)] - [ntuple] Copy sealed page in RPageSinkBuf after compression
+* [[#16736](https://github.com/root-project/root/issues/16736)] - Please improve documentation and/or argument names for TH1::GetQuantiles()
+* [[#16715](https://github.com/root-project/root/issues/16715)] - TMVA fails to link to cudnn
+* [[#16687](https://github.com/root-project/root/issues/16687)] - Loss of floating point precision when saving TCanvas as ROOT macro
+* [[#16680](https://github.com/root-project/root/issues/16680)] - TMVA/Sofie tutorials used same name for generated files bur are run in parallel.
+* [[#16647](https://github.com/root-project/root/issues/16647)] - ROOT_ADD_PYUNITTEST and ROOT_ADD_GTEST are naming test inconsitently.
+* [[#16600](https://github.com/root-project/root/issues/16600)] - TMVA RReader not multithread safe
+* [[#16588](https://github.com/root-project/root/issues/16588)] - Fix RFieldBase::GetNElements() for record/class fields
+* [[#16562](https://github.com/root-project/root/issues/16562)] - TTreeViewer save session absolute path
+* [[#16523](https://github.com/root-project/root/issues/16523)] - OpenGL doesn't work on macosx
+* [[#16513](https://github.com/root-project/root/issues/16513)] - [ntuple] Clarifications about late schema extension
+* [[#16479](https://github.com/root-project/root/issues/16479)] - Add THStack/TH1 constructor for TRatioPlot
+* [[#16475](https://github.com/root-project/root/issues/16475)] - Unable to use EOS tokens with RDataFrame since 6.32
+* [[#16474](https://github.com/root-project/root/issues/16474)] - Hadd does not add correctly histograms in nested folders
+* [[#16469](https://github.com/root-project/root/issues/16469)] - cppyy no aggregate initialization constructor
+* [[#16419](https://github.com/root-project/root/issues/16419)] - RooUnblindOffset crashes for root version 6.32
+* [[#16402](https://github.com/root-project/root/issues/16402)] - Importing ROOT prevents Python garbage collection
+* [[#16374](https://github.com/root-project/root/issues/16374)] - Configuring with builtin xrootd can fail because of seemingly not found OpenSSL library
+* [[#16366](https://github.com/root-project/root/issues/16366)] - Compiler warning in Bytes.h: casts away qualifiers
+* [[#16360](https://github.com/root-project/root/issues/16360)] - [rdf] gcc14 issue warning in `RDF/InterfaceUtils.hxx`
+* [[#16326](https://github.com/root-project/root/issues/16326)] - [ntuple] Better control of cluster ordering for parallel writes
+* [[#16324](https://github.com/root-project/root/issues/16324)] - [ntuple] Allow for creating bare model from on-disk info
+* [[#16321](https://github.com/root-project/root/issues/16321)] - [ntuple] Split RNTupleView in two classes
+* [[#16298](https://github.com/root-project/root/issues/16298)] - [PyROOT] Conversion from `std::string` to `std::string_view` broken in 6.32
+* [[#16290](https://github.com/root-project/root/issues/16290)] - [ntuple] Provide tutorial for (envisioned) framework usage
+* [[#16252](https://github.com/root-project/root/issues/16252)] - tutorial-rcanvas-df104-py
+* [[#16249](https://github.com/root-project/root/issues/16249)] - Iterating with a range for does one extra iteration
+* [[#16244](https://github.com/root-project/root/issues/16244)] - JSROOT not drawing bins with content=0 but entries > 0 in TProfile2D
+* [[#16241](https://github.com/root-project/root/issues/16241)] - [ntuple] Method to prepare cluster commit / flush column write buffers
+* [[#16236](https://github.com/root-project/root/issues/16236)] - [ntuple] Improve field token usage for parallel writing
+* [[#16219](https://github.com/root-project/root/issues/16219)] - Module map on the new XCode version for macos15-beta
+* [[#16190](https://github.com/root-project/root/issues/16190)] - TFileMerger behaviour when the directory structure contains repeated names
+* [[#16184](https://github.com/root-project/root/issues/16184)] - Serialisation (and therefore I/O) issues with TF1 and TFitResultPtr
+* [[#16167](https://github.com/root-project/root/issues/16167)] - TGeomPainter Web not behaving the same way as TGeomPainter ROOT
+* [[#16149](https://github.com/root-project/root/issues/16149)] - CMake and xrootd builtin
+* [[#16135](https://github.com/root-project/root/issues/16135)] - [ntuple] Cannot create RFieldBase for signed char
+* [[#16124](https://github.com/root-project/root/issues/16124)] - RNTupleInspector returns wrong compressed size for large N-tuples
+* [[#16121](https://github.com/root-project/root/issues/16121)] - Potential memory leak in clang triggered by `findScope`
+* [[#16051](https://github.com/root-project/root/issues/16051)] - TColor::GetFreeColorIndex() returns index that is already used
+* [[#16047](https://github.com/root-project/root/issues/16047)] - TMVA SOFIE shadow declaration
+* [[#16031](https://github.com/root-project/root/issues/16031)] - VecOps binary functions not using the right types
+* [[#16024](https://github.com/root-project/root/issues/16024)] - `thisroot.sh` tries to drop the wrong lib paths from the existing environment
+* [[#15977](https://github.com/root-project/root/issues/15977)] - [gui] Event StatusBar does not work well when TMarker outside of zoom region
+* [[#15962](https://github.com/root-project/root/issues/15962)] - outdated help links
+* [[#15959](https://github.com/root-project/root/issues/15959)] - [RF] Make Offset(“bin”) usable for CLs method
+* [[#15948](https://github.com/root-project/root/issues/15948)] - Tex Gyre fonts has a bad side effect ...
+* [[#15924](https://github.com/root-project/root/issues/15924)] - python -c 'import ROOT' fails on macOS if ROOT is built with gnuinstall=ON
+* [[#15919](https://github.com/root-project/root/issues/15919)] - Problem with TClass::GetListOfAllPublicMethods() in python
+* [[#15912](https://github.com/root-project/root/issues/15912)] - Clad issues with `MacOSX15.0.sdk`
+* [[#15887](https://github.com/root-project/root/issues/15887)] - Broken plot .C macros for default Name() argument in plotOn()
+* [[#15883](https://github.com/root-project/root/issues/15883)] - Initialize TRatioPlot margins from Pad margins set in the current style
+* [[#15851](https://github.com/root-project/root/issues/15851)] - Support for TProfile and TProfile2D projectionX and projectionXY options in JSROOT
+* [[#15774](https://github.com/root-project/root/issues/15774)] - [ci] Add Python version to Windows precomplied release title or filename
+* [[#15756](https://github.com/root-project/root/issues/15756)] - [RF][HS3] ATLAS ttbar workspaces roundtrip
+* [[#15740](https://github.com/root-project/root/issues/15740)] - `THStack` does not automatically shows negative bins
+* [[#15738](https://github.com/root-project/root/issues/15738)] - Segmentation violation during build on ix86 (32 bit intel)
+* [[#15736](https://github.com/root-project/root/issues/15736)] - [df] ProgressBar reporting on number of files is now broken
+* [[#15727](https://github.com/root-project/root/issues/15727)] - Windows CMake project cannot find_library() after integrating with ROOT.
+* [[#15703](https://github.com/root-project/root/issues/15703)] - Leaking memory though strings in PyROOT
+* [[#15686](https://github.com/root-project/root/issues/15686)] - JITted code changes the execution order of computation graph nodes
+* [[#15666](https://github.com/root-project/root/issues/15666)] - [ntuple][doc] document RNTuple Anchor format
+* [[#15661](https://github.com/root-project/root/issues/15661)] - [ntuple] Cannot properly read late model extension (meta)data
+* [[#15643](https://github.com/root-project/root/issues/15643)] - TGFileContainer crashes in pyroot
+* [[#15617](https://github.com/root-project/root/issues/15617)] - `RDF::Describe` returns an incorrect file count
+* [[#15590](https://github.com/root-project/root/issues/15590)] - Infinite recursion in TFile::Open
+* [[#15537](https://github.com/root-project/root/issues/15537)] - [cling] Crash when non-void function does not return a value
+* [[#15534](https://github.com/root-project/root/issues/15534)] - RNTuple: fields with mixed STL types sometimes fail to be filled
+* [[#15511](https://github.com/root-project/root/issues/15511)] - Possible memory corruption in cling
+* [[#15503](https://github.com/root-project/root/issues/15503)] - Allow users to change default Snapshot behaviour of collections
+* [[#15460](https://github.com/root-project/root/issues/15460)] - TEnum::GetEnum("B")->GetUnderlyingType() does not following typedefs
+* [[#15447](https://github.com/root-project/root/issues/15447)] - `-Dminimal=ON` disables `runtime_cxxmodules`
+* [[#15442](https://github.com/root-project/root/issues/15442)] - Distributed RDataFrame does not see all defined column names
+* [[#15425](https://github.com/root-project/root/issues/15425)] - TTreeProcessorMP processes events multiple times when there are more threads than entries
+* [[#15419](https://github.com/root-project/root/issues/15419)] - RNTuple: add max key length field to RNTuple anchor
+* [[#15407](https://github.com/root-project/root/issues/15407)] - `cling::utils::Lookup::Named` does not look into using directive
+* [[#15406](https://github.com/root-project/root/issues/15406)] - `TEnum::GetEnum` does not seem to see 'through' using statements.
+* [[#15405](https://github.com/root-project/root/issues/15405)] - [RF] ExternalConstraints documentation incorrect for RooMCStudy
+* [[#15384](https://github.com/root-project/root/issues/15384)] - GetCppName: Mangled version of the C++ symbol
+* [[#15336](https://github.com/root-project/root/issues/15336)] - [MSVC] ROOT_x86 failed due to libCling.exp : error LNK2001: unresolved external symbol "char const * __cdecl __std_find_trivial(char const *,char const *,char)
+* [[#15321](https://github.com/root-project/root/issues/15321)] - [MSVC] Root is failed with error G694476FC: static_assert failed "Unexpected size"
+* [[#15285](https://github.com/root-project/root/issues/15285)] - Fast element setter/getter for TMatrixT/TVectorT classes
+* [[#15270](https://github.com/root-project/root/issues/15270)] - MakeClass and MakeSelector fails with special character in branchname.
+* [[#15269](https://github.com/root-project/root/issues/15269)] - Iterators in pyROOT working differently in ROOT master compared to 6.30/02
+* [[#15213](https://github.com/root-project/root/issues/15213)] - cmake warning while configuring
+* [[#15178](https://github.com/root-project/root/issues/15178)] - ROOT generates CMake warnings when building from the tarball
+* [[#15118](https://github.com/root-project/root/issues/15118)] - jsoninterface does not build if provided with RapidYAML
+* [[#15107](https://github.com/root-project/root/issues/15107)] - [ci] clang-format fails when adding commits
+* [[#15090](https://github.com/root-project/root/issues/15090)] - TClass::GetClassInfo() is not thread safe
+* [[#15039](https://github.com/root-project/root/issues/15039)] - [RDataFrame] Expose more local df operations for distributed RDF
+* [[#14966](https://github.com/root-project/root/issues/14966)] - Fix print check for object that return different types for begin() and end()
+* [[#14871](https://github.com/root-project/root/issues/14871)] - [ntuple] add streamer info records to TFile
+* [[#14809](https://github.com/root-project/root/issues/14809)] - [ntuple] Incorrect treatment of unsplittable classes
+* [[#14808](https://github.com/root-project/root/issues/14808)] - [ntuple] TObject serialization faulty
+* [[#14789](https://github.com/root-project/root/issues/14789)] - interpreter fails with assertion in debug builds on ARM when upgrading gcc
+* [[#14767](https://github.com/root-project/root/issues/14767)] - rootn.exe instant crash on startup
+* [[#14710](https://github.com/root-project/root/issues/14710)] - `std::set` not working in Windows PyROOT
+* [[#14697](https://github.com/root-project/root/issues/14697)] - [FreeBSD] davix build failure
+* [[#14592](https://github.com/root-project/root/issues/14592)] - Error value and context of call to FT_Set_Char_Size in TTF::SetTextSize should be in error message
+* [[#14561](https://github.com/root-project/root/issues/14561)] - [ROOT-4936] TMatrixTSym is not actually symmetric
+* [[#14544](https://github.com/root-project/root/issues/14544)] - [ROOT-8515] Make TEntryList class reference relevant
+* [[#14541](https://github.com/root-project/root/issues/14541)] - [ROOT-6193] Editor for palette axis cannot set title properties
+* [[#14487](https://github.com/root-project/root/issues/14487)] - Assert when trying to write RNTuple to full disk
+* [[#14217](https://github.com/root-project/root/issues/14217)] - Module merge problems with GCC 13, C++20, Pythia8
+* [[#14173](https://github.com/root-project/root/issues/14173)] - Adding a couple of useful methods in THnD
+* [[#14132](https://github.com/root-project/root/issues/14132)] - Lazy multithread RDataFrame::Snapshot cause unnessary warning and break gDirectory
+* [[#14055](https://github.com/root-project/root/issues/14055)] - Failing build with `-Dasan=ON` and memory leak in minimal build
+* [[#13729](https://github.com/root-project/root/issues/13729)] - [math] Contour method has some problems with Minuit2
+* [[#13677](https://github.com/root-project/root/issues/13677)] - [Cling] Potential unloading issue which breaks distributed execution
+* [[#13511](https://github.com/root-project/root/issues/13511)] - TMapFile can't work
+* [[#13498](https://github.com/root-project/root/issues/13498)] - Assertion failure in TMVA `can't dereference value-initialized vector iterator`
+* [[#13481](https://github.com/root-project/root/issues/13481)] - Update doc to express deprecation of genreflex and usage of rootcling as a replacement
+* [[#13432](https://github.com/root-project/root/issues/13432)] - TCling::AutoLoad may not work if a pcm linked to the library is not preloaded
+* [[#13055](https://github.com/root-project/root/issues/13055)] - -Dtmva-sofie=OFF does not switch off sofie.
+* [[#13016](https://github.com/root-project/root/issues/13016)] - Extra vertical space on a canvas when CanvasPreferGL is set to true, reproducible via SSH
+* [[#12935](https://github.com/root-project/root/issues/12935)] - [RF] Global correlation coefficients after SumW2Error
+* [[#12842](https://github.com/root-project/root/issues/12842)] - [ntuple] Review the column representation of nullable fields
+* [[#12509](https://github.com/root-project/root/issues/12509)] - TClass prefers ` over `` specialization
+* [[#12460](https://github.com/root-project/root/issues/12460)] - [ntuple] Set non-negative column flag for unsigned integer fields
+* [[#12428](https://github.com/root-project/root/issues/12428)] - Test failure in RNTuple: RNTuple.TClassEBO fails
+* [[#12426](https://github.com/root-project/root/issues/12426)] - RNTuple endian issues
+* [[#12334](https://github.com/root-project/root/issues/12334)] - TTreeReader fails to read `T` as `T`
+* [[#12272](https://github.com/root-project/root/issues/12272)] - CI: releases
+* [[#12251](https://github.com/root-project/root/issues/12251)] - Problems with `TH1::GetQuantiles`
+* [[#12182](https://github.com/root-project/root/issues/12182)] - TPython::Eval does not work with string with python3.8+ for ROOT 6.24-6.26.8
+* [[#12136](https://github.com/root-project/root/issues/12136)] - [ntuple] `RNTupleView`'s move ctor causes double delete
+* [[#12108](https://github.com/root-project/root/issues/12108)] - `constexpr` function return incorrect value in Windows
+* [[#11749](https://github.com/root-project/root/issues/11749)] - Remove empty files from the source distribution tarball
+* [[#11707](https://github.com/root-project/root/issues/11707)] - Crash when macro is named main.cpp
+* [[#11603](https://github.com/root-project/root/issues/11603)] - Disable automatic 'call home' in cmake when not needed
+* [[#11353](https://github.com/root-project/root/issues/11353)] - Compiled program with libNew.so crash
+* [[#10317](https://github.com/root-project/root/issues/10317)] - [Doxygen] tutorials appear as namespaces
+* [[#10239](https://github.com/root-project/root/issues/10239)] - ? wildcard broken in TChain::Add()
+* [[#10010](https://github.com/root-project/root/issues/10010)] - TLeaf::ReadBasket invalid write in TMVA test
+* [[#9792](https://github.com/root-project/root/issues/9792)] - should fLogger be persistant ?
+* [[#9646](https://github.com/root-project/root/issues/9646)] - Numerically stable computation of invariant mass
+* [[#9637](https://github.com/root-project/root/issues/9637)] - `TGraph::Add(TF1 *f)` method like for `TH1`'s
+* [[#9445](https://github.com/root-project/root/issues/9445)] - Hit errors when build ROOT with msvc on AddressSanitizer mode
+* [[#9425](https://github.com/root-project/root/issues/9425)] - [RF] Figure out how to handle RooArgList with duplicates and hash-assisted find
+* [[#9188](https://github.com/root-project/root/issues/9188)] - Unnecessary (?) warnings reading `unique_ptr`
+* [[#9137](https://github.com/root-project/root/issues/9137)] - [tree] TTree/TChain silently return bogus data if friend is shorter than main tree
+* [[#8833](https://github.com/root-project/root/issues/8833)] - Crash reading >= 3D array in TTree via MakeClass in Windows ROOT6 compilation
+* [[#8828](https://github.com/root-project/root/issues/8828)] - Crash when defining something in the Detail namespace after a lookup of that namespace
+* [[#8815](https://github.com/root-project/root/issues/8815)] - TBB not inheriting CXXFLAGS
+* [[#8716](https://github.com/root-project/root/issues/8716)] - Minuit2: FCNGradientBase::CheckGradient() is ignored
+* [[#8704](https://github.com/root-project/root/issues/8704)] - [DF] Add support for 'missing' columns
+* [[#8367](https://github.com/root-project/root/issues/8367)] - *** Break *** segmentation violation in case of compilation errors in unnamed macros
+* [[#8194](https://github.com/root-project/root/issues/8194)] - TClass::GetStreamerInfo crashes for several classes
+* [[#8031](https://github.com/root-project/root/issues/8031)] - Reserve "build" directory name in ROOT sources for build files
+* [[#7875](https://github.com/root-project/root/issues/7875)] - [ntuple] Improve normalization of platform-specific primitives and typedefs
+* [[#7823](https://github.com/root-project/root/issues/7823)] - [RF] RooStatsUtils::MakeCleanWorkspace
+* [[#7713](https://github.com/root-project/root/issues/7713)] - [Tree] Bogus data silently read when trying to access an indexed friend TTree with an invalid index
+* [[#7160](https://github.com/root-project/root/issues/7160)] - MacOS: -Dcocoa=ON -Dopengl=OFF pass cmake but fail compilation
+* [[#7103](https://github.com/root-project/root/issues/7103)] - [RF] HistFactory::FlexibleInterpVar Interpolation code2 and code3 are the same
+* [[ROOT-10975](https://its.cern.ch/jira/browse/ROOT-10975)] - ACLiC should make rootcling warnings visible
+* [[ROOT-10908](https://its.cern.ch/jira/browse/ROOT-10908)] - SMatrix is written as a Double32_t
+* [[ROOT-10902](https://its.cern.ch/jira/browse/ROOT-10902)] - SMatrix read from TTree contains all zeroes
+* [[ROOT-10883](https://its.cern.ch/jira/browse/ROOT-10883)] - Warning in TBrowser when selecting "Add" method of a histogram
+* [[ROOT-10865](https://its.cern.ch/jira/browse/ROOT-10865)] - [RVec] No Doxygen documentation about arithmetic operators
+* [[ROOT-10698](https://its.cern.ch/jira/browse/ROOT-10698)] - Valgrind dies at assertion ‘!overlap’ failed
+* [[ROOT-10539](https://its.cern.ch/jira/browse/ROOT-10539)] - Slow tutorials/dataframe/df027_SQliteDependencyOverVersion.C
+* [[ROOT-10414](https://its.cern.ch/jira/browse/ROOT-10414)] - rootcling doesn't parse -isystem correctly
+* [[ROOT-10342](https://its.cern.ch/jira/browse/ROOT-10342)] - valuePrint 'forgets' template argument in type when printing about an assignment statement.
+* [[ROOT-10200](https://its.cern.ch/jira/browse/ROOT-10200)] - Automatic reloading doesn't work for std::cout on macOS
+* [[ROOT-9961](https://its.cern.ch/jira/browse/ROOT-9961)] - TTree::Print("toponly") inserts extra newline between listed items
+* [[ROOT-9953](https://its.cern.ch/jira/browse/ROOT-9953)] - TRint should not terminate on assert
+* [[ROOT-9919](https://its.cern.ch/jira/browse/ROOT-9919)] - TFile construction silently drops XRootD protocol
+* [[ROOT-9918](https://its.cern.ch/jira/browse/ROOT-9918)] - Crash TMVA by running (unused?) public function from TMVA::Factory
+* [[ROOT-9705](https://its.cern.ch/jira/browse/ROOT-9705)] - flag to disable (root)test(s) that uses remote files
+* [[ROOT-9673](https://its.cern.ch/jira/browse/ROOT-9673)] - Printout from TMinuit::mnrazz() can not be suppressed
+* [[ROOT-9448](https://its.cern.ch/jira/browse/ROOT-9448)] - libNew returns nullptr instead of implementing operator new, has many warnings
+* [[ROOT-9420](https://its.cern.ch/jira/browse/ROOT-9420)] - CTest: Fail on warnings in tutorials
+* [[ROOT-9395](https://its.cern.ch/jira/browse/ROOT-9395)] - ROOTTEST_ADD_TEST does not complain if source file does not exist
+* [[ROOT-9354](https://its.cern.ch/jira/browse/ROOT-9354)] - [TTreeReader] Crash when reading array from in-memory tree
+* [[ROOT-9266](https://its.cern.ch/jira/browse/ROOT-9266)] - Cannot unload python code / shared library
+* [[ROOT-8991](https://its.cern.ch/jira/browse/ROOT-8991)] - Cling exports buggy include paths to AcLIC
+* [[ROOT-8775](https://its.cern.ch/jira/browse/ROOT-8775)] - TTree::MakeSelector can produce invalid C++ code
+* [[ROOT-8745](https://its.cern.ch/jira/browse/ROOT-8745)] - Reloading of code that uses R__LOAD_LIBRARY fails
+* [[ROOT-8519](https://its.cern.ch/jira/browse/ROOT-8519)] - Bug when use simple math functions in TTree::SetAlias()
+* [[ROOT-8271](https://its.cern.ch/jira/browse/ROOT-8271)] - roofit asymmetry plots create corrupted pdf when not providing a custom binning
+* [[ROOT-8256](https://its.cern.ch/jira/browse/ROOT-8256)] - Limit to complexity of TTreeFormula? - "Bad Numerical Expression"
+* [[ROOT-8240](https://its.cern.ch/jira/browse/ROOT-8240)] - Must not unload or reload cling runtime universe
+* [[ROOT-8078](https://its.cern.ch/jira/browse/ROOT-8078)] - Tab completion fails for lambda functions
+* [[ROOT-7137](https://its.cern.ch/jira/browse/ROOT-7137)] - Unsafe object ownership issue with TClonesArray/TObjArray
+* [[ROOT-6968](https://its.cern.ch/jira/browse/ROOT-6968)] - Interpretation of nparam argument to TMethodCall::SetParamPtrs changed in root 6
+* [[ROOT-6931](https://its.cern.ch/jira/browse/ROOT-6931)] - Tab completion of file names in directories with '+'
+* [[ROOT-6822](https://its.cern.ch/jira/browse/ROOT-6822)] - Dangerous behavior of TTreeFormula::EvalInstance64
+* [[ROOT-6313](https://its.cern.ch/jira/browse/ROOT-6313)] - TClingClassInfo::ClassProperty() might give wrong results
+* [[ROOT-5983](https://its.cern.ch/jira/browse/ROOT-5983)] - Add test for wrong data member in TBranchElement
+* [[ROOT-5963](https://its.cern.ch/jira/browse/ROOT-5963)] - Re-implement tab completion for ROOT
+* [[ROOT-5843](https://its.cern.ch/jira/browse/ROOT-5843)] - List of loaded libraries
+* [[ROOT-5439](https://its.cern.ch/jira/browse/ROOT-5439)] - Dump-output of TH1 not showing pointerness of fArray
+* [[ROOT-2345](https://its.cern.ch/jira/browse/ROOT-2345)] - Optimize TMatrixDSparse operation kAtA
+
+## Release 6.34.02
+
+Published on December 16, 2024
+
+### Items addressed in this release
+
+This release includes a few minor fixes in RDataFrame and RooFit, besides the items below. Moreover, built-in Davix was patched to build with GCC14 while waiting for the new Davix release.
+
+* [[#17145](https://github.com/root-project/root/issues/17145)] - Distributed RDataFrame cannot deal with same column name in different branches
+- [[#17190](https://github.com/root-project/root/issues/17190)] - Compiler error with GCC 14.2.0 related to Davix
+* [[#17222](https://github.com/root-project/root/issues/17222)] - Regression in Python ownership for histograms within subdirectories with ROOT 6.34.00
+- [[#17223](https://github.com/root-project/root/issues/17223)] - TFileMerger leaves files open resulting in corrupt metadata
+
+## Release 6.34.04
+
+Published on February 10, 2025
+
+### Items addressed in this release
+
+This branch includes a few minor fixes in RDataFrame, as well as a mechanism was added to specify binding options when opening sockets with TSocketServer.
+Moreover, the items listed below were addressed:
+
+* [[ROOT-7372](https://its.cern.ch/jira/browse/ROOT-7372)] - Accessing complex map branches crashes in PyROOT
+* [[ROOT-10482](https://its.cern.ch/jira/browse/ROOT-10482)] - pullHist and residHist biased
+* [[#12841](https://github.com/root-project/root/issues/12841)] - [ntuple] prefer IO constructor in RField::GenerateValue()
+* [[#14007](https://github.com/root-project/root/issues/14007)] - Cannot create a RNtuple into a TDirectory
+* [[#15473](https://github.com/root-project/root/issues/15473)] - Segmentation fault when building with the mold linker
+* [[#16189](https://github.com/root-project/root/issues/16189)] - TFile::k630forwardCompatibility does not apply to new files correctly
+* [[#16560](https://github.com/root-project/root/issues/16560)] - Issue using TColor and saving canvas to ROOT format
+* [[#17291](https://github.com/root-project/root/issues/17291)] - [RF] Parameter ordering bug in RooFormulaArgStreamer
+* [[#17305](https://github.com/root-project/root/issues/17305)] - The ONNX.Tile5D test in tmva/sofie/test/TestCustomModelsFromONNX.cxx writes array elements beyond the last element in the array
+* [[#17321](https://github.com/root-project/root/issues/17321)] - [RF] Unused Class rule
+* [[#17442](https://github.com/root-project/root/issues/17442)] - [Python] Regression in `std::bytes` support with cppyy inside ROOT
+* [[#17444](https://github.com/root-project/root/issues/17444)] - ROOT doesn't compile with gcc-15
+* [[#17472](https://github.com/root-project/root/issues/17472)] - RooEllipse not drawn in notebooks with `%jsroot on`
+
+## HEAD of the v6-34-00-patches branch
\ No newline at end of file
diff --git a/bindings/experimental/distrdf/python/DistRDF/HeadNode.py b/bindings/experimental/distrdf/python/DistRDF/HeadNode.py
index ab7cc9cdde7fb..69ec035624210 100644
--- a/bindings/experimental/distrdf/python/DistRDF/HeadNode.py
+++ b/bindings/experimental/distrdf/python/DistRDF/HeadNode.py
@@ -105,7 +105,7 @@ def __init__(self, backend: BaseBackend, npartitions: Optional[int], localdf: RO
# Internal RDataFrame object, useful to expose information such as
# column names.
- self._localdf = localdf
+ self.rdf_node = localdf
# A dictionary where the keys are the IDs of the objects to live visualize
# and the values are the corresponding callback functions
@@ -119,8 +119,8 @@ def __del__(self):
the garbage collector, the cppyy memory regulator and the C++ object
destructor.
"""
- if hasattr(self, "_localdf"):
- del self._localdf
+ if hasattr(self, "rdf_node"):
+ del self.rdf_node
@property
def npartitions(self) -> Optional[int]:
diff --git a/bindings/experimental/distrdf/python/DistRDF/Node.py b/bindings/experimental/distrdf/python/DistRDF/Node.py
index 7caf8c3df9bb7..fd85564dd2fa0 100644
--- a/bindings/experimental/distrdf/python/DistRDF/Node.py
+++ b/bindings/experimental/distrdf/python/DistRDF/Node.py
@@ -59,9 +59,12 @@ class Node(object):
rdf_node: A reference to the result of calling a function of the
RDataFrame API with the current operation. This is practically a
node of the true computation graph, which is being executed in some
- distributed task. It is a transient attribute. On the client, it
- is always None. The value is computed and stored only during a task
- on a worker.
+ distributed task. It is a transient attribute. On the client, this
+ is filled when the operation being called is a transformation. It is
+ done to ensure information regarding e.g. column names and types is
+ populated and available locally with the right dependencies. On a
+ worker, this attribute can represent any node of the C++ computation
+ graph and its created and processed within the worker.
"""
def __init__(self, get_head: Callable[[], HeadNode], node_id: int = 0,
diff --git a/bindings/experimental/distrdf/python/DistRDF/Proxy.py b/bindings/experimental/distrdf/python/DistRDF/Proxy.py
index c8a8ccfc9a92d..21bf9555302b8 100644
--- a/bindings/experimental/distrdf/python/DistRDF/Proxy.py
+++ b/bindings/experimental/distrdf/python/DistRDF/Proxy.py
@@ -59,8 +59,12 @@ def execute_graph(node: Node) -> None:
def _update_internal_df_with_transformation(node:Node, operation: Operation) -> None:
"""Propagate transform operations to the headnode internal RDataFrame"""
- rdf_operation = getattr(node.get_head()._localdf, operation.name)
- node.get_head()._localdf = rdf_operation(*operation.args, **operation.kwargs)
+ # The parent node is None only if the node is the head node
+ parent_node = node.parent if node.parent is not None else node
+ # Retrieve correct C++ transformation to call
+ rdf_operation = getattr(parent_node.rdf_node, operation.name)
+ # Call and inject the result in the Python node
+ node.rdf_node = rdf_operation(*operation.args, **operation.kwargs)
def _create_new_node(parent: Node, operation: Operation.Operation) -> Node:
"""Creates a new node and inserts it in the computation graph"""
@@ -253,11 +257,11 @@ def __getattr__(self, attr):
def GetColumnNames(self):
"""Forward call to the internal RDataFrame object"""
- return self.proxied_node.get_head()._localdf.GetColumnNames()
+ return self.proxied_node.rdf_node.GetColumnNames()
def GetColumnType(self, column):
"""Forward call to the internal RDataFrame object"""
- return self.proxied_node.get_head()._localdf.GetColumnType(column)
+ return self.proxied_node.rdf_node.GetColumnType(column)
def _create_new_op(self, *args, **kwargs):
"""
diff --git a/bindings/experimental/distrdf/test/test_callable_generator.py b/bindings/experimental/distrdf/test/test_callable_generator.py
index 5fe9d53fa8893..a4911aaa28642 100644
--- a/bindings/experimental/distrdf/test/test_callable_generator.py
+++ b/bindings/experimental/distrdf/test/test_callable_generator.py
@@ -73,10 +73,10 @@ def test_mapper_from_graph(self):
node = Proxy.NodeProxy(hn)
# Set of operations to build the graph
n1 = node.Define("mock_col", "1")
- n2 = node.Filter("mock_col>0").Filter("mock_col>0")
+ n2 = n1.Filter("mock_col>0").Filter("mock_col>0")
n4 = n2.Count()
n5 = n1.Count()
- n6 = node.Filter("mock_col>0") # noqa: avoid PEP8 F841
+ n6 = n1.Filter("mock_col>0") # noqa: avoid PEP8 F841
# Generate and execute the mapper
graph_dict = hn._generate_graph_dict()
@@ -107,10 +107,10 @@ def test_mapper_with_pruning(self):
# Set of operations to build the graph
n1 = node.Define("mock_col", "1")
- n2 = node.Filter("mock_col>0").Filter("mock_col>0")
+ n2 = n1.Filter("mock_col>0").Filter("mock_col>0")
n4 = n2.Count()
n5 = n1.Count()
- n6 = node.Filter("mock_col>0") # noqa: avoid PEP8 F841
+ n6 = n1.Filter("mock_col>0") # noqa: avoid PEP8 F841
# Until here the graph would be:
# [1, 2, 2, 3, 3, 2]
@@ -152,11 +152,11 @@ def test_dfs_graph_with_pruning_transformations(self):
# Graph nodes
n1 = node.Define("mock_col", "1")
- n2 = node.Filter("mock_col>0")
+ n2 = n1.Filter("mock_col>0")
n3 = n2.Filter("mock_col>0")
n4 = n3.Count() # noqa: avoid PEP8 F841
n5 = n1.Filter("mock_col>0") # noqa: avoid PEP8 F841
- n6 = node.Filter("mock_col>0") # noqa: avoid PEP8 F841
+ n6 = n1.Filter("mock_col>0") # noqa: avoid PEP8 F841
# Transformation pruning, n5 was earlier a transformation node
n5 = n1.Count() # noqa: avoid PEP8 F841
@@ -189,11 +189,11 @@ def test_dfs_graph_with_recursive_pruning(self):
# Graph nodes
n1 = node.Define("mock_col", "1")
- n2 = node.Filter("mock_col>0")
+ n2 = n1.Filter("mock_col>0")
n3 = n2.Filter("mock_col>0")
n4 = n3.Count() # noqa: avoid PEP8 F841
n5 = n1.Filter("mock_col>0") # noqa: avoid PEP8 F841
- n6 = node.Filter("mock_col>0") # noqa: avoid PEP8 F841
+ n6 = n1.Filter("mock_col>0") # noqa: avoid PEP8 F841
# Remove user references from n4, n3, n2
n4 = n3 = n2 = None # noqa: avoid PEP8 F841
@@ -226,11 +226,11 @@ def test_dfs_graph_with_parent_pruning(self):
# Graph nodes
n1 = node.Define("mock_col", "1")
- n2 = node.Filter("mock_col>0")
+ n2 = n1.Filter("mock_col>0")
n3 = n2.Filter("mock_col>0")
n4 = n3.Count() # noqa: avoid PEP8 F841
n5 = n1.Filter("mock_col>0") # noqa: avoid PEP8 F841
- n6 = node.Filter("mock_col>0") # noqa: avoid PEP8 F841
+ n6 = n1.Filter("mock_col>0") # noqa: avoid PEP8 F841
# Remove references from n2 (which shouldn't affect the graph)
n2 = None
@@ -265,12 +265,12 @@ def test_dfs_graph_with_computed_values_pruning(self):
# Graph nodes
n1 = node.Define("mock_col", "1")
- n2 = node.Filter("mock_col>0")
+ n2 = n1.Filter("mock_col>0")
n3 = n2.Filter("mock_col>0")
n4 = n3.Count() # noqa: avoid PEP8 F841
n5 = n1.Filter("mock_col>0")
n6 = n5.Count()
- n7 = node.Filter("mock_col>0")
+ n7 = n1.Filter("mock_col>0")
# This is to make sure action nodes with
# already computed values are pruned.
@@ -307,11 +307,11 @@ def test_dfs_graph_without_pruning(self):
# Graph nodes
n1 = node.Define("mock_col", "1")
- n2 = node.Filter("mock_col>0")
+ n2 = n1.Filter("mock_col>0")
n3 = n2.Filter("mock_col>0")
n4 = n3.Count() # noqa: avoid PEP8 F841
n5 = n1.Count() # noqa: avoid PEP8 F841
- n6 = node.Filter("mock_col>0") # noqa: avoid PEP8 F841
+ n6 = n1.Filter("mock_col>0") # noqa: avoid PEP8 F841
# Generate and execute the mapper
graph_dict = hn._generate_graph_dict()
@@ -340,7 +340,7 @@ def test_nodes_gt_python_recursion_limit(self):
node = Proxy.NodeProxy(hn)
# Create three branches
n1 = node.Define("mock_col", "1")
- n2 = node.Filter("mock_col>0")
+ n2 = n1.Filter("mock_col>0")
# Append 1000 nodes per branch
for i in range(1000):
n1 = n1.Define(f"mock_col_{i}", "1")
diff --git a/bindings/experimental/distrdf/test/test_proxy.py b/bindings/experimental/distrdf/test/test_proxy.py
index 2a780abdf56d3..45bab8ac86d86 100644
--- a/bindings/experimental/distrdf/test/test_proxy.py
+++ b/bindings/experimental/distrdf/test/test_proxy.py
@@ -109,12 +109,14 @@ def test_supported_transformation(self):
}
for transformation, args in transformations.items():
- newProxy = getattr(proxy, transformation)(*args)
- self.assertEqual(proxy.proxied_node._new_op_name, transformation)
- self.assertIsInstance(newProxy, Proxy.NodeProxy)
- self.assertEqual(newProxy.proxied_node.operation.name,
+ parent_node = proxy.proxied_node
+ proxy = getattr(proxy, transformation)(*args)
+ # Calling the operation on the parent node modifies an attribute
+ self.assertEqual(parent_node._new_op_name, transformation)
+ self.assertIsInstance(proxy, Proxy.NodeProxy)
+ self.assertEqual(proxy.proxied_node.operation.name,
transformation)
- self.assertEqual(newProxy.proxied_node.operation.args, args)
+ self.assertEqual(proxy.proxied_node.operation.args, args)
def test_node_attr_transformation(self):
"""
@@ -304,4 +306,31 @@ def test_get_column_type_after_define(self):
column_types.append(column_type)
self.assertSequenceEqual(column_types, ["double", "int"])
-
\ No newline at end of file
+
+ def test_columninfo_defines_twobranches(self):
+ """
+ Check new column names and types are available locally even if the same
+ column name is used in different branches of the computation graph.
+ """
+
+ node = create_dummy_headnode(1)
+ proxy = Proxy.NodeProxy(node)
+
+ cols_before = proxy.GetColumnNames()
+ self.assertSequenceEqual(cols_before, [])
+
+ expected_coltype_1 = "Long64_t"
+ branch_1 = proxy.Define("mycol", f"static_cast<{expected_coltype_1}>(42)")
+
+ expected_coltype_2 = "float"
+ branch_2 = proxy.Define("mycol", f"static_cast<{expected_coltype_2}>(33)")
+
+ cols_1 = branch_1.GetColumnNames()
+ self.assertSequenceEqual(cols_1, ["mycol"])
+ coltype_1 = branch_1.GetColumnType(cols_1[0])
+ self.assertEqual(coltype_1, expected_coltype_1)
+
+ cols_2 = branch_2.GetColumnNames()
+ self.assertSequenceEqual(cols_2, ["mycol"])
+ coltype_2 = branch_2.GetColumnType(cols_2[0])
+ self.assertEqual(coltype_2, expected_coltype_2)
diff --git a/bindings/jupyroot/python/JupyROOT/helpers/utils.py b/bindings/jupyroot/python/JupyROOT/helpers/utils.py
index 82836a07951b3..3552974f58d69 100644
--- a/bindings/jupyroot/python/JupyROOT/helpers/utils.py
+++ b/bindings/jupyroot/python/JupyROOT/helpers/utils.py
@@ -74,7 +74,7 @@
// We are in jupyter notebooks, use require.js which should be configured already
requirejs.config({{
- paths: {{ 'JSRootCore' : [ 'build/jsroot', 'https://root.cern/js/7.7.4/build/jsroot', 'https://jsroot.gsi.de/7.7.4/build/jsroot' ] }}
+ paths: {{ 'JSRootCore' : [ 'build/jsroot', 'https://root.cern/js/7.8.1/build/jsroot', 'https://jsroot.gsi.de/7.8.1/build/jsroot' ] }}
}})(['JSRootCore'], function(Core) {{
display_{jsDivId}(Core);
}});
@@ -97,7 +97,7 @@
// Try loading a local version of requirejs and fallback to cdn if not possible.
script_load_{jsDivId}(base_url + 'static/build/jsroot.js', function(){{
console.error('Fail to load JSROOT locally, please check your jupyter_notebook_config.py file');
- script_load_{jsDivId}('https://root.cern/js/7.7.4/build/jsroot.js', function(){{
+ script_load_{jsDivId}('https://root.cern/js/7.8.1/build/jsroot.js', function(){{
document.getElementById("{jsDivId}").innerHTML = "Failed to load JSROOT";
}});
}});
diff --git a/bindings/pyroot/cppyy/CPyCppyy/include/CPyCppyy/DispatchPtr.h b/bindings/pyroot/cppyy/CPyCppyy/include/CPyCppyy/DispatchPtr.h
index 760443c17380b..bd098f6917fa0 100644
--- a/bindings/pyroot/cppyy/CPyCppyy/include/CPyCppyy/DispatchPtr.h
+++ b/bindings/pyroot/cppyy/CPyCppyy/include/CPyCppyy/DispatchPtr.h
@@ -64,7 +64,7 @@ class CPYCPPYY_CLASS_EXTERN DispatchPtr {
}
private:
- PyObject* Get() const;
+ PyObject* Get(bool borrowed=true) const;
private:
PyObject* fPyHardRef;
diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/API.cxx b/bindings/pyroot/cppyy/CPyCppyy/src/API.cxx
index f1e4a336bd825..4c28b7a91ff2c 100644
--- a/bindings/pyroot/cppyy/CPyCppyy/src/API.cxx
+++ b/bindings/pyroot/cppyy/CPyCppyy/src/API.cxx
@@ -413,7 +413,16 @@ void CPyCppyy::ExecScript(const std::string& name, const std::vectortp_new((PyTypeObject*)pytype, args, nullptr);
@@ -133,7 +140,7 @@ static PyObject* enum_ctype(PyObject* cls, PyObject* args, PyObject* kwds)
CPyCppyy::CPPEnum* CPyCppyy::CPPEnum_New(const std::string& name, Cppyy::TCppScope_t scope)
{
// Create a new enum type based on the actual C++ type. Enum values are added to
-// the type by may also live in the enclosing scope.
+// the type but may also live in the enclosing scope.
CPPEnum* pyenum = nullptr;
@@ -190,8 +197,13 @@ CPyCppyy::CPPEnum* CPyCppyy::CPPEnum_New(const std::string& name, Cppyy::TCppSco
// collect the enum values
Cppyy::TCppIndex_t ndata = Cppyy::GetNumEnumData(etype);
+ bool values_ok = true;
for (Cppyy::TCppIndex_t idata = 0; idata < ndata; ++idata) {
PyObject* val = pyval_from_enum(resolved, pyenum, pyside_type, etype, idata);
+ if (!val) {
+ values_ok = false;
+ break;
+ }
PyObject* pydname = CPyCppyy_PyText_FromString(Cppyy::GetEnumDataName(etype, idata).c_str());
PyObject_SetAttr(pyenum, pydname, val);
PyObject_SetAttr(val, PyStrings::gCppName, pydname);
@@ -206,6 +218,13 @@ CPyCppyy::CPPEnum* CPyCppyy::CPPEnum_New(const std::string& name, Cppyy::TCppSco
Py_DECREF(args);
Py_DECREF(pymeta);
+ if (!values_ok) {
+ if (!PyErr_Occurred())
+ PyErr_SetString(PyExc_ValueError, "could not set some of the enum values");
+ Py_DECREF(pyenum);
+ return nullptr;
+ }
+
} else {
// presumably not a class enum; simply pretend int
Py_INCREF(&PyInt_Type);
diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/CPPScope.cxx b/bindings/pyroot/cppyy/CPyCppyy/src/CPPScope.cxx
index 40f4e0648af9a..144f0605d0c84 100644
--- a/bindings/pyroot/cppyy/CPyCppyy/src/CPPScope.cxx
+++ b/bindings/pyroot/cppyy/CPyCppyy/src/CPPScope.cxx
@@ -479,12 +479,14 @@ static PyObject* meta_getattro(PyObject* pyclass, PyObject* pyname)
// try all outstanding using namespaces in turn to find the attribute (will cache
// locally later; TODO: doing so may cause pathological cases)
for (auto pyref : *klass->fImp.fUsing) {
- PyObject* pyuscope = PyWeakref_GetObject(pyref);
+ PyObject* pyuscope = CPyCppyy_GetWeakRef(pyref);
if (pyuscope) {
attr = PyObject_GetAttr(pyuscope, pyname);
- if (attr) break;
- PyErr_Clear();
+ if (!attr) PyErr_Clear();
+ Py_DECREF(pyuscope);
}
+ if (attr)
+ break;
}
}
diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/CPyCppyy.h b/bindings/pyroot/cppyy/CPyCppyy/src/CPyCppyy.h
index d0f3b0fc5621a..e3244bdcfe74b 100644
--- a/bindings/pyroot/cppyy/CPyCppyy/src/CPyCppyy.h
+++ b/bindings/pyroot/cppyy/CPyCppyy/src/CPyCppyy.h
@@ -351,8 +351,26 @@ inline PyObject* CPyCppyy_tp_call(PyObject* cb, PyObject* args, size_t, PyObject
}
#endif
+// weakref forced strong reference
+#if PY_VERSION_HEX < 0x30d0000
+static inline PyObject* CPyCppyy_GetWeakRef(PyObject* ref) {
+ PyObject* pyobject = PyWeakref_GetObject(ref);
+ if (!pyobject || pyobject == Py_None)
+ return nullptr;
+ Py_INCREF(pyobject);
+ return pyobject;
+}
+#else
+static inline PyObject* CPyCppyy_GetWeakRef(PyObject* ref) {
+ PyObject* pyobject = nullptr;
+ if (PyWeakref_GetRef(ref, &pyobject) != -1)
+ return pyobject;
+ return nullptr;
+}
+#endif
+
// Py_TYPE as inline function
-#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_TYPE)
+#if PY_VERSION_HEX < 0x03090000 && !defined(Py_SET_TYPE)
static inline
void _Py_SET_TYPE(PyObject *ob, PyTypeObject *type) { ob->ob_type = type; }
#define Py_SET_TYPE(ob, type) _Py_SET_TYPE((PyObject*)(ob), type)
diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/Converters.cxx b/bindings/pyroot/cppyy/CPyCppyy/src/Converters.cxx
index 0a8bce369233f..fafd1cc7a70de 100644
--- a/bindings/pyroot/cppyy/CPyCppyy/src/Converters.cxx
+++ b/bindings/pyroot/cppyy/CPyCppyy/src/Converters.cxx
@@ -203,6 +203,7 @@ static bool IsPyCArgObject(PyObject* pyobject)
return Py_TYPE(pyobject) == pycarg_type;
}
+#if PY_VERSION_HEX < 0x30d0000
static bool IsCTypesArrayOrPointer(PyObject* pyobject)
{
static PyTypeObject* cstgdict_type = nullptr;
@@ -219,6 +220,43 @@ static bool IsCTypesArrayOrPointer(PyObject* pyobject)
return true;
return false;
}
+#else
+// the internals of ctypes have been redone, requiring a more complex checking
+namespace {
+
+typedef struct {
+ PyTypeObject *DictRemover_Type;
+ PyTypeObject *PyCArg_Type;
+ PyTypeObject *PyCField_Type;
+ PyTypeObject *PyCThunk_Type;
+ PyTypeObject *StructParam_Type;
+ PyTypeObject *PyCType_Type;
+ PyTypeObject *PyCStructType_Type;
+ PyTypeObject *UnionType_Type;
+ PyTypeObject *PyCPointerType_Type;
+// ... unused fields omitted ...
+} _cppyy_ctypes_state;
+
+} // unnamed namespace
+
+static bool IsCTypesArrayOrPointer(PyObject* pyobject)
+{
+ static _cppyy_ctypes_state* state = nullptr;
+ if (!state) {
+ PyObject* ctmod = PyImport_AddModule("_ctypes"); // the extension module, not the Python one
+ if (ctmod)
+ state = (_cppyy_ctypes_state*)PyModule_GetState(ctmod);
+ }
+
+ // verify for object types that have a C payload
+ if (state && (PyObject_IsInstance((PyObject*)Py_TYPE(pyobject), (PyObject*)state->PyCType_Type) ||
+ PyObject_IsInstance((PyObject*)Py_TYPE(pyobject), (PyObject*)state->PyCPointerType_Type))) {
+ return true;
+ }
+
+ return false;
+}
+#endif
//- helper to establish life lines -------------------------------------------
@@ -3475,8 +3513,11 @@ static struct InitConvFactories_t {
gf["const signed char&"] = gf["const char&"];
#if __cplusplus > 201402L
gf["std::byte"] = gf["uint8_t"];
+ gf["byte"] = gf["uint8_t"];
gf["const std::byte&"] = gf["const uint8_t&"];
+ gf["const byte&"] = gf["const uint8_t&"];
gf["std::byte&"] = gf["uint8_t&"];
+ gf["byte&"] = gf["uint8_t&"];
#endif
gf["std::int8_t"] = gf["int8_t"];
gf["const std::int8_t&"] = gf["const int8_t&"];
diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/DispatchPtr.cxx b/bindings/pyroot/cppyy/CPyCppyy/src/DispatchPtr.cxx
index 43b73fb8f107c..5affdd2120317 100644
--- a/bindings/pyroot/cppyy/CPyCppyy/src/DispatchPtr.cxx
+++ b/bindings/pyroot/cppyy/CPyCppyy/src/DispatchPtr.cxx
@@ -8,13 +8,18 @@
//-----------------------------------------------------------------------------
-PyObject* CPyCppyy::DispatchPtr::Get() const
+PyObject* CPyCppyy::DispatchPtr::Get(bool borrowed) const
{
- if (fPyHardRef) return fPyHardRef;
+ if (fPyHardRef) {
+ if (!borrowed) Py_INCREF(fPyHardRef);
+ return fPyHardRef;
+ }
if (fPyWeakRef) {
- PyObject* disp = PyWeakref_GetObject(fPyWeakRef);
- if (disp != Py_None) // dispatcher object disappeared?
+ PyObject* disp = CPyCppyy_GetWeakRef(fPyWeakRef);
+ if (disp) { // dispatcher object disappeared?
+ if (borrowed) Py_DECREF(disp);
return disp;
+ }
}
return nullptr;
}
@@ -36,9 +41,10 @@ CPyCppyy::DispatchPtr::DispatchPtr(PyObject* pyobj, bool strong) : fPyHardRef(nu
//-----------------------------------------------------------------------------
CPyCppyy::DispatchPtr::DispatchPtr(const DispatchPtr& other, void* cppinst) : fPyWeakRef(nullptr)
{
- PyObject* pyobj = other.Get();
+ PyObject* pyobj = other.Get(false /* not borrowed */);
fPyHardRef = pyobj ? (PyObject*)((CPPInstance*)pyobj)->Copy(cppinst) : nullptr;
if (fPyHardRef) ((CPPInstance*)fPyHardRef)->SetDispatchPtr(this);
+ Py_XDECREF(pyobj);
}
//-----------------------------------------------------------------------------
@@ -48,9 +54,10 @@ CPyCppyy::DispatchPtr::~DispatchPtr() {
// is "notified" by nulling out the reference and an exception will be raised on
// continued access
if (fPyWeakRef) {
- PyObject* pyobj = PyWeakref_GetObject(fPyWeakRef);
- if (pyobj && pyobj != Py_None && ((CPPScope*)Py_TYPE(pyobj))->fFlags & CPPScope::kIsPython)
+ PyObject* pyobj = CPyCppyy_GetWeakRef(fPyWeakRef);
+ if (pyobj && ((CPPScope*)Py_TYPE(pyobj))->fFlags & CPPScope::kIsPython)
((CPPInstance*)pyobj)->GetObjectRaw() = nullptr;
+ Py_XDECREF(pyobj);
Py_DECREF(fPyWeakRef);
} else if (fPyHardRef) {
((CPPInstance*)fPyHardRef)->GetObjectRaw() = nullptr;
@@ -64,9 +71,10 @@ CPyCppyy::DispatchPtr& CPyCppyy::DispatchPtr::assign(const DispatchPtr& other, v
if (this != &other) {
Py_XDECREF(fPyWeakRef); fPyWeakRef = nullptr;
Py_XDECREF(fPyHardRef);
- PyObject* pyobj = other.Get();
+ PyObject* pyobj = other.Get(false /* not borrowed */);
fPyHardRef = pyobj ? (PyObject*)((CPPInstance*)pyobj)->Copy(cppinst) : nullptr;
if (fPyHardRef) ((CPPInstance*)fPyHardRef)->SetDispatchPtr(this);
+ Py_XDECREF(pyobj);
}
return *this;
}
@@ -86,9 +94,7 @@ void CPyCppyy::DispatchPtr::CppOwns()
{
// C++ maintains the hardref, keeping the PyObject alive w/o outstanding ref
if (fPyWeakRef) {
- fPyHardRef = PyWeakref_GetObject(fPyWeakRef);
- if (fPyHardRef == Py_None) fPyHardRef = nullptr;
- Py_XINCREF(fPyHardRef);
+ fPyHardRef = CPyCppyy_GetWeakRef(fPyWeakRef);
Py_DECREF(fPyWeakRef); fPyWeakRef = nullptr;
}
}
diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/Dispatcher.cxx b/bindings/pyroot/cppyy/CPyCppyy/src/Dispatcher.cxx
index cdef2b8c7b0de..06731d6d85d78 100644
--- a/bindings/pyroot/cppyy/CPyCppyy/src/Dispatcher.cxx
+++ b/bindings/pyroot/cppyy/CPyCppyy/src/Dispatcher.cxx
@@ -484,7 +484,12 @@ bool CPyCppyy::InsertDispatcher(CPPScope* klass, PyObject* bases, PyObject* dct,
// Python class to keep the inheritance tree intact)
for (const auto& name : protected_names) {
PyObject* disp_dct = PyObject_GetAttr(disp_proxy, PyStrings::gDict);
+#if PY_VERSION_HEX < 0x30d00f0
PyObject* pyf = PyMapping_GetItemString(disp_dct, (char*)name.c_str());
+#else
+ PyObject* pyf = nullptr;
+ PyMapping_GetOptionalItemString(disp_dct, (char*)name.c_str(), &pyf);
+#endif
if (pyf) {
PyObject_SetAttrString((PyObject*)klass, (char*)name.c_str(), pyf);
Py_DECREF(pyf);
diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/Executors.cxx b/bindings/pyroot/cppyy/CPyCppyy/src/Executors.cxx
index dd6b71d1d504e..16a1b31de7594 100644
--- a/bindings/pyroot/cppyy/CPyCppyy/src/Executors.cxx
+++ b/bindings/pyroot/cppyy/CPyCppyy/src/Executors.cxx
@@ -1036,6 +1036,8 @@ struct InitExecFactories_t {
#if __cplusplus > 201402L
gf["std::byte ptr"] = (ef_t)+[](cdims_t d) { return new ByteArrayExecutor{d}; };
gf["const std::byte ptr"] = gf["std::byte ptr"];
+ gf["byte ptr"] = gf["std::byte ptr"];
+ gf["const byte ptr"] = gf["std::byte ptr"];
#endif
gf["int8_t ptr"] = (ef_t)+[](cdims_t d) { return new Int8ArrayExecutor{d}; };
gf["uint8_t ptr"] = (ef_t)+[](cdims_t d) { return new UInt8ArrayExecutor{d}; };
@@ -1060,8 +1062,11 @@ struct InitExecFactories_t {
gf["internal_enum_type_t ptr"] = gf["int ptr"];
#if __cplusplus > 201402L
gf["std::byte"] = gf["uint8_t"];
+ gf["byte"] = gf["uint8_t"];
gf["std::byte&"] = gf["uint8_t&"];
+ gf["byte&"] = gf["uint8_t&"];
gf["const std::byte&"] = gf["const uint8_t&"];
+ gf["const byte&"] = gf["const uint8_t&"];
#endif
gf["std::int8_t"] = gf["int8_t"];
gf["std::int8_t&"] = gf["int8_t&"];
diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/LowLevelViews.cxx b/bindings/pyroot/cppyy/CPyCppyy/src/LowLevelViews.cxx
index a58e7bed5c47b..0f31cb8bfa58a 100644
--- a/bindings/pyroot/cppyy/CPyCppyy/src/LowLevelViews.cxx
+++ b/bindings/pyroot/cppyy/CPyCppyy/src/LowLevelViews.cxx
@@ -817,38 +817,62 @@ static PyObject* ll_reshape(CPyCppyy::LowLevelView* self, PyObject* shape)
//---------------------------------------------------------------------------
-static PyObject* ll_array(CPyCppyy::LowLevelView* self, PyObject* args, PyObject* /* kwds */)
+static PyObject* ll_array(CPyCppyy::LowLevelView* self, PyObject* args, PyObject* kwds)
{
// Construct a numpy array from the lowlevelview (w/o copy if possible); this
// uses the Python methods to avoid depending on numpy directly
// Expect as most a dtype from the arguments;
- static PyObject* ctmod = PyImport_ImportModule("numpy"); // ref-count kept
- if (!ctmod)
+ static PyObject* npmod = PyImport_ImportModule("numpy"); // ref-count kept
+ if (!npmod)
return nullptr;
-// expect possible dtype from the arguments, otherwie take it from the type code
- PyObject* dtype;
- if (!args || PyTuple_GET_SIZE(args) != 1) {
- PyObject* npdtype = PyObject_GetAttr(ctmod, CPyCppyy::PyStrings::gDType);
- PyObject* typecode = ll_typecode(self, nullptr);
- dtype = PyObject_CallFunctionObjArgs(npdtype, typecode, nullptr);
- Py_DECREF(typecode);
- Py_DECREF(npdtype);
- } else {
- dtype = PyTuple_GET_ITEM(args, 0);
- Py_INCREF(dtype);
+ bool docopy = false;
+ if (kwds) {
+ PyObject* pycp = PyObject_GetItem(kwds, CPyCppyy::PyStrings::gCopy);
+ if (!pycp) {
+ PyErr_SetString(PyExc_TypeError, "__array__ only supports the \"copy\" keyword");
+ return nullptr;
+ }
+
+ docopy = PyObject_IsTrue(pycp);
+ Py_DECREF(pycp);
}
- if (!dtype)
- return nullptr;
+ if (!docopy) { // view requested
+ // expect possible dtype from the arguments, otherwise take it from the type code
+ PyObject* dtype;
+ if (!args || PyTuple_GET_SIZE(args) != 1) {
+ PyObject* npdtype = PyObject_GetAttr(npmod, CPyCppyy::PyStrings::gDType);
+ PyObject* typecode = ll_typecode(self, nullptr);
+ dtype = PyObject_CallFunctionObjArgs(npdtype, typecode, nullptr);
+ Py_DECREF(typecode);
+ Py_DECREF(npdtype);
+ } else {
+ dtype = PyTuple_GET_ITEM(args, 0);
+ Py_INCREF(dtype);
+ }
- PyObject* npfrombuf = PyObject_GetAttr(ctmod, CPyCppyy::PyStrings::gFromBuffer);
- PyObject* view = PyObject_CallFunctionObjArgs(npfrombuf, (PyObject*)self, dtype, nullptr);
- Py_DECREF(dtype);
- Py_DECREF(npfrombuf);
+ if (!dtype)
+ return nullptr;
+
+ PyObject* npfrombuf = PyObject_GetAttr(npmod, CPyCppyy::PyStrings::gFromBuffer);
+ PyObject* view = PyObject_CallFunctionObjArgs(npfrombuf, (PyObject*)self, dtype, nullptr);
+ Py_DECREF(dtype);
+ Py_DECREF(npfrombuf);
+
+ return view;
+
+ } else { // copy requested
+ PyObject* npcopy = PyObject_GetAttr(npmod, CPyCppyy::PyStrings::gCopy);
+ PyObject* newarr = PyObject_CallFunctionObjArgs(npcopy, (PyObject*)self, nullptr);
+ Py_DECREF(npcopy);
- return view;
+ return newarr;
+ }
+
+// never get here
+ return nullptr;
}
diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/ProxyWrappers.cxx b/bindings/pyroot/cppyy/CPyCppyy/src/ProxyWrappers.cxx
index 801337717e081..0c1c0e002ecfd 100644
--- a/bindings/pyroot/cppyy/CPyCppyy/src/ProxyWrappers.cxx
+++ b/bindings/pyroot/cppyy/CPyCppyy/src/ProxyWrappers.cxx
@@ -499,11 +499,9 @@ PyObject* CPyCppyy::GetScopeProxy(Cppyy::TCppScope_t scope)
// Retrieve scope proxy from the known ones.
PyClassMap_t::iterator pci = gPyClasses.find(scope);
if (pci != gPyClasses.end()) {
- PyObject* pyclass = PyWeakref_GetObject(pci->second);
- if (pyclass != Py_None) {
- Py_INCREF(pyclass);
+ PyObject* pyclass = CPyCppyy_GetWeakRef(pci->second);
+ if (pyclass)
return pyclass;
- }
}
return nullptr;
diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/PyStrings.cxx b/bindings/pyroot/cppyy/CPyCppyy/src/PyStrings.cxx
index e918d44fc0d54..abbf16ece0049 100644
--- a/bindings/pyroot/cppyy/CPyCppyy/src/PyStrings.cxx
+++ b/bindings/pyroot/cppyy/CPyCppyy/src/PyStrings.cxx
@@ -7,6 +7,7 @@
PyObject* CPyCppyy::PyStrings::gAssign = nullptr;
PyObject* CPyCppyy::PyStrings::gBases = nullptr;
PyObject* CPyCppyy::PyStrings::gBase = nullptr;
+PyObject* CPyCppyy::PyStrings::gCopy = nullptr;
PyObject* CPyCppyy::PyStrings::gCppBool = nullptr;
PyObject* CPyCppyy::PyStrings::gCppName = nullptr;
PyObject* CPyCppyy::PyStrings::gAnnotations = nullptr;
@@ -87,6 +88,7 @@ bool CPyCppyy::CreatePyStrings() {
CPPYY_INITIALIZE_STRING(gAssign, __assign__);
CPPYY_INITIALIZE_STRING(gBases, __bases__);
CPPYY_INITIALIZE_STRING(gBase, __base__);
+ CPPYY_INITIALIZE_STRING(gCopy, copy);
#if PY_VERSION_HEX < 0x03000000
CPPYY_INITIALIZE_STRING(gCppBool, __cpp_nonzero__);
#else
@@ -169,6 +171,7 @@ PyObject* CPyCppyy::DestroyPyStrings() {
// Remove all cached python strings.
Py_DECREF(PyStrings::gBases); PyStrings::gBases = nullptr;
Py_DECREF(PyStrings::gBase); PyStrings::gBase = nullptr;
+ Py_DECREF(PyStrings::gCopy); PyStrings::gCopy = nullptr;
Py_DECREF(PyStrings::gCppBool); PyStrings::gCppBool = nullptr;
Py_DECREF(PyStrings::gCppName); PyStrings::gCppName = nullptr;
Py_DECREF(PyStrings::gAnnotations); PyStrings::gAnnotations = nullptr;
diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/PyStrings.h b/bindings/pyroot/cppyy/CPyCppyy/src/PyStrings.h
index 7012b89ce1620..55eaef58273a3 100644
--- a/bindings/pyroot/cppyy/CPyCppyy/src/PyStrings.h
+++ b/bindings/pyroot/cppyy/CPyCppyy/src/PyStrings.h
@@ -10,6 +10,7 @@ namespace PyStrings {
extern PyObject* gAssign;
extern PyObject* gBases;
extern PyObject* gBase;
+ extern PyObject* gCopy;
extern PyObject* gCppBool;
extern PyObject* gCppName;
extern PyObject* gAnnotations;
diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/Pythonize.cxx b/bindings/pyroot/cppyy/CPyCppyy/src/Pythonize.cxx
index 2196b94ff33f4..8559b2ebfe7ff 100644
--- a/bindings/pyroot/cppyy/CPyCppyy/src/Pythonize.cxx
+++ b/bindings/pyroot/cppyy/CPyCppyy/src/Pythonize.cxx
@@ -528,12 +528,14 @@ PyObject* VectorData(PyObject* self, PyObject*)
//---------------------------------------------------------------------------
-PyObject* VectorArray(PyObject* self, PyObject* /* args */)
+PyObject* VectorArray(PyObject* self, PyObject* args, PyObject* kwargs)
{
PyObject* pydata = VectorData(self, nullptr);
- PyObject* view = PyObject_CallMethodNoArgs(pydata, PyStrings::gArray);
+ PyObject* arrcall = PyObject_GetAttr(pydata, PyStrings::gArray);
+ PyObject* newarr = PyObject_Call(arrcall, args, kwargs);
+ Py_DECREF(arrcall);
Py_DECREF(pydata);
- return view;
+ return newarr;
}
@@ -1809,7 +1811,7 @@ bool CPyCppyy::Pythonize(PyObject* pyclass, const std::string& name)
Utility::AddToClass(pyclass, "data", (PyCFunction)VectorData);
// numpy array conversion
- Utility::AddToClass(pyclass, "__array__", (PyCFunction)VectorArray);
+ Utility::AddToClass(pyclass, "__array__", (PyCFunction)VectorArray, METH_VARARGS | METH_KEYWORDS /* unused */);
// checked getitem
if (HasAttrDirect(pyclass, PyStrings::gLen)) {
diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/Utility.cxx b/bindings/pyroot/cppyy/CPyCppyy/src/Utility.cxx
index 03eda4eaaedc5..e4cf418b77c02 100644
--- a/bindings/pyroot/cppyy/CPyCppyy/src/Utility.cxx
+++ b/bindings/pyroot/cppyy/CPyCppyy/src/Utility.cxx
@@ -640,7 +640,7 @@ void CPyCppyy::Utility::ConstructCallbackPreamble(const std::string& retType,
<< retType << "\"), CPyCppyy::DestroyConverter};\n";
std::vector arg_is_ptr;
if (nArgs) {
- arg_is_ptr.reserve(nArgs);
+ arg_is_ptr.resize(nArgs);
code << " CPYCPPYY_STATIC std::vector>> argcvs;\n"
<< " if (argcvs.empty()) {\n"
<< " argcvs.reserve(" << nArgs << ");\n";
diff --git a/bindings/pyroot/cppyy/cppyy-backend/clingwrapper/src/clingwrapper.cxx b/bindings/pyroot/cppyy/cppyy-backend/clingwrapper/src/clingwrapper.cxx
index 0effab90721ec..72322aac644c2 100644
--- a/bindings/pyroot/cppyy/cppyy-backend/clingwrapper/src/clingwrapper.cxx
+++ b/bindings/pyroot/cppyy/cppyy-backend/clingwrapper/src/clingwrapper.cxx
@@ -257,7 +257,8 @@ class ApplicationStarter {
"slice_array", "slice", "stack", "string", "strstream", "strstreambuf",
"time_get_byname", "time_get", "time_put_byname", "time_put", "unary_function",
"unary_negate", "unique_ptr", "underflow_error", "unordered_map", "unordered_multimap",
- "unordered_multiset", "unordered_set", "valarray", "vector", "weak_ptr", "wstring"};
+ "unordered_multiset", "unordered_set", "valarray", "vector", "weak_ptr", "wstring",
+ "__hash_not_enabled"};
for (auto& name : stl_names)
gSTLNames.insert(name);
diff --git a/bindings/pyroot/cppyy/cppyy/doc/source/changelog.rst b/bindings/pyroot/cppyy/cppyy/doc/source/changelog.rst
index 60295fb89f3ed..033f819e17f4f 100644
--- a/bindings/pyroot/cppyy/cppyy/doc/source/changelog.rst
+++ b/bindings/pyroot/cppyy/cppyy/doc/source/changelog.rst
@@ -18,9 +18,15 @@ master
* Improved overload selection for classes with deep hierarchies
* Fixed regression when calling static methods with default args on instances
* Fixed regression for pickling enums (in global scope only)
+* Proper error handling on ``memoryview(array.array('B', []))``
* Auto-cast elements of std::vector, with T a class type
* Add a ``Sequence_Check()`` method to the public API
* Fix offset calculation of ``std::vector`` datamember on Mac arm
+* Extend API to define executor and converter aliases
+* Use importlib.metadata instead of pkg_resources for py3.11 and later
+* Added out-of-bounds handling for small char-based enums
+* Fixes for py3.12 and py3.13
+* Upgrade backend to Clang16
2023-11-15: 3.1.2
diff --git a/bindings/pyroot/cppyy/cppyy/python/cppyy/__init__.py b/bindings/pyroot/cppyy/cppyy/python/cppyy/__init__.py
index 957443289d7c0..a7456a0c8b124 100644
--- a/bindings/pyroot/cppyy/cppyy/python/cppyy/__init__.py
+++ b/bindings/pyroot/cppyy/cppyy/python/cppyy/__init__.py
@@ -305,18 +305,27 @@ def add_library_path(path):
if apipath_extra is None:
try:
- import pkg_resources as pr
+ if 0x30a0000 <= sys.hexversion:
+ import importlib.metadata as m
- d = pr.get_distribution('CPyCppyy')
- for line in d.get_metadata_lines('RECORD'):
- if 'API.h' in line:
- part = line[0:line.find(',')]
+ for p in m.files('CPyCppyy'):
+ if p.match('API.h'):
+ ape = p.locate()
+ break
+ del p, m
+ else:
+ import pkg_resources as pr
+
+ d = pr.get_distribution('CPyCppyy')
+ for line in d.get_metadata_lines('RECORD'):
+ if 'API.h' in line:
+ ape = os.path.join(d.location, line[0:line.find(',')])
+ break
+ del line, d, pr
- ape = os.path.join(d.location, part)
if os.path.exists(ape):
apipath_extra = os.path.dirname(os.path.dirname(ape))
-
- del part, d, pr
+ del ape
except Exception:
pass
diff --git a/bindings/pyroot/cppyy/cppyy/python/cppyy/_stdcpp_fix.py b/bindings/pyroot/cppyy/cppyy/python/cppyy/_stdcpp_fix.py
index 90c3687b41696..0004c87803b72 100644
--- a/bindings/pyroot/cppyy/cppyy/python/cppyy/_stdcpp_fix.py
+++ b/bindings/pyroot/cppyy/cppyy/python/cppyy/_stdcpp_fix.py
@@ -1,6 +1,6 @@
import sys
-# It may be that the interpreter (wether python or pypy-c) was not linked
+# It may be that the interpreter (whether python or pypy-c) was not linked
# with C++; force its loading before doing anything else (note that not
# linking with C++ spells trouble anyway for any C++ libraries ...)
if 'linux' in sys.platform and 'GCC' in sys.version:
diff --git a/bindings/pyroot/cppyy/cppyy/test/advancedcpp.cxx b/bindings/pyroot/cppyy/cppyy/test/advancedcpp.cxx
index 3daa11c14f7d1..23f4c041f69c2 100644
--- a/bindings/pyroot/cppyy/cppyy/test/advancedcpp.cxx
+++ b/bindings/pyroot/cppyy/cppyy/test/advancedcpp.cxx
@@ -73,12 +73,12 @@ double pass_double_through_const_ref(const double& d) { return d; }
// for math conversions testing
-bool operator==(const some_comparable& c1, const some_comparable& c2 )
+bool operator==(const some_comparable& c1, const some_comparable& c2)
{
return &c1 != &c2; // the opposite of a pointer comparison
}
-bool operator!=( const some_comparable& c1, const some_comparable& c2 )
+bool operator!=(const some_comparable& c1, const some_comparable& c2)
{
return &c1 == &c2; // the opposite of a pointer comparison
}
diff --git a/bindings/pyroot/cppyy/cppyy/test/test_datatypes.py b/bindings/pyroot/cppyy/cppyy/test/test_datatypes.py
index 710ff5eb1e982..26e2afc21b482 100644
--- a/bindings/pyroot/cppyy/cppyy/test/test_datatypes.py
+++ b/bindings/pyroot/cppyy/cppyy/test/test_datatypes.py
@@ -1038,10 +1038,10 @@ def test20_object_comparisons_with_cpp__eq__(self):
struct Comparable1 {
Comparable1(int i) : fInt(i) {}
int fInt;
- static bool __eq__(const Comparable1& self, const Comparable1& other){
+ static bool __eq__(const Comparable1& self, const Comparable1& other) {
return self.fInt == other.fInt;
}
- static bool __ne__(const Comparable1& self, const Comparable1& other){
+ static bool __ne__(const Comparable1& self, const Comparable1& other) {
return self.fInt != other.fInt;
}
};
@@ -1049,10 +1049,10 @@ def test20_object_comparisons_with_cpp__eq__(self):
struct Comparable2 {
Comparable2(int i) : fInt(i) {}
int fInt;
- bool __eq__(const Comparable2& other){
+ bool __eq__(const Comparable2& other) {
return fInt == other.fInt;
}
- bool __ne__(const Comparable2& other){
+ bool __ne__(const Comparable2& other) {
return fInt != other.fInt;
}
}; }""")
diff --git a/bindings/pyroot/cppyy/cppyy/test/test_fragile.py b/bindings/pyroot/cppyy/cppyy/test/test_fragile.py
index 75fce0b1c8dc3..fbacb7bb220fc 100644
--- a/bindings/pyroot/cppyy/cppyy/test/test_fragile.py
+++ b/bindings/pyroot/cppyy/cppyy/test/test_fragile.py
@@ -528,7 +528,7 @@ def get_errmsg(exc, allspace=allspace):
err = get_errmsg(cppdef_exc)
assert "FailedtoparsethegivenC++code" in err
assert "error:" in err
- assert "expectedunqualified-id" in err
+ assert "invaliddigit" in err
assert "1aap=42;" in err
def test22_cppexec(self):
diff --git a/bindings/pyroot/cppyy/cppyy/test/test_regression.py b/bindings/pyroot/cppyy/cppyy/test/test_regression.py
index cf0df68ceb899..6bd0baf387ba4 100644
--- a/bindings/pyroot/cppyy/cppyy/test/test_regression.py
+++ b/bindings/pyroot/cppyy/cppyy/test/test_regression.py
@@ -9,8 +9,12 @@ class TestREGRESSION:
def setup_class(cls):
import cppyy
- def stringpager(text, cls=cls):
- cls.helpout.append(text)
+ if sys.hexversion < 0x30d0000:
+ def stringpager(text, cls=cls):
+ cls.helpout.append(text)
+ else:
+ def stringpager(text, title='', cls=cls):
+ cls.helpout.append(text)
import pydoc
pydoc.pager = stringpager
diff --git a/bindings/pyroot/cppyy/cppyy/test/test_stltypes.py b/bindings/pyroot/cppyy/cppyy/test/test_stltypes.py
index 771cc941bc51e..b42fa0b08b16a 100644
--- a/bindings/pyroot/cppyy/cppyy/test/test_stltypes.py
+++ b/bindings/pyroot/cppyy/cppyy/test/test_stltypes.py
@@ -1704,9 +1704,9 @@ def test01_deque_byvalue_regression(self):
"""Return by value of a deque used to crash"""
import cppyy
- assert cppyy.cppdef("""std::deque f() {
+ assert cppyy.cppdef("""std::deque emptyf() {
std::deque d; d.push_back(0); return d ; }""")
- x = cppyy.gbl.f()
+ x = cppyy.gbl.emptyf()
assert x
del x
diff --git a/bindings/pyroot/cppyy/patches/CPyCppyy-Adapt-to-no-std-in-ROOT.patch b/bindings/pyroot/cppyy/patches/CPyCppyy-Adapt-to-no-std-in-ROOT.patch
index 975acf0d3b5f2..df089f1be5bb6 100644
--- a/bindings/pyroot/cppyy/patches/CPyCppyy-Adapt-to-no-std-in-ROOT.patch
+++ b/bindings/pyroot/cppyy/patches/CPyCppyy-Adapt-to-no-std-in-ROOT.patch
@@ -1,7 +1,7 @@
From 24b94cde0a5fa6b46be05359b7218af9bb295d87 Mon Sep 17 00:00:00 2001
From: Jonas Rembser
Date: Tue, 12 Mar 2024 01:59:37 +0100
-Subject: [PATCH] [CPyCppyy] Adapt to no `std::` in ROOT
+Subject: [PATCH 1/2] [CPyCppyy] Adapt to no `std::` in ROOT
---
.../pyroot/cppyy/CPyCppyy/src/Converters.cxx | 20 +++++++++++--------
@@ -127,3 +127,58 @@ index c1720cf3f2..ae0e31cac8 100644
--
2.44.0
+From ef0836c23c850ce3113d5a7ff5787dee9e094099 Mon Sep 17 00:00:00 2001
+From: Aaron Jomy
+Date: Tue, 21 Jan 2025 14:09:03 +0100
+Subject: [PATCH 2/2] [PyROOT] Add executors and converters for `std::byte`
+
+Fixes issue: https://github.com/root-project/root/issues/17442
+---
+ bindings/pyroot/cppyy/CPyCppyy/src/Converters.cxx | 3 +++
+ bindings/pyroot/cppyy/CPyCppyy/src/Executors.cxx | 5 +++++
+ 2 files changed, 8 insertions(+)
+
+diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/Converters.cxx b/bindings/pyroot/cppyy/CPyCppyy/src/Converters.cxx
+index c127604a6e..21d3d4aa73 100644
+--- a/bindings/pyroot/cppyy/CPyCppyy/src/Converters.cxx
++++ b/bindings/pyroot/cppyy/CPyCppyy/src/Converters.cxx
+@@ -3522,8 +3522,11 @@ public:
+ gf["const signed char&"] = gf["const char&"];
+ #if __cplusplus > 201402L
+ gf["std::byte"] = gf["uint8_t"];
++ gf["byte"] = gf["uint8_t"];
+ gf["const std::byte&"] = gf["const uint8_t&"];
++ gf["const byte&"] = gf["const uint8_t&"];
+ gf["std::byte&"] = gf["uint8_t&"];
++ gf["byte&"] = gf["uint8_t&"];
+ #endif
+ gf["std::int8_t"] = gf["int8_t"];
+ gf["const std::int8_t&"] = gf["const int8_t&"];
+diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/Executors.cxx b/bindings/pyroot/cppyy/CPyCppyy/src/Executors.cxx
+index 5e94846771..edefcf5b5b 100644
+--- a/bindings/pyroot/cppyy/CPyCppyy/src/Executors.cxx
++++ b/bindings/pyroot/cppyy/CPyCppyy/src/Executors.cxx
+@@ -1022,6 +1022,8 @@ public:
+ #if __cplusplus > 201402L
+ gf["std::byte ptr"] = (ef_t)+[](cdims_t d) { return new ByteArrayExecutor{d}; };
+ gf["const std::byte ptr"] = gf["std::byte ptr"];
++ gf["byte ptr"] = gf["std::byte ptr"];
++ gf["const byte ptr"] = gf["std::byte ptr"];
+ #endif
+ gf["int8_t ptr"] = (ef_t)+[](cdims_t d) { return new Int8ArrayExecutor{d}; };
+ gf["uint8_t ptr"] = (ef_t)+[](cdims_t d) { return new UInt8ArrayExecutor{d}; };
+@@ -1046,8 +1048,11 @@ public:
+ gf["internal_enum_type_t ptr"] = gf["int ptr"];
+ #if __cplusplus > 201402L
+ gf["std::byte"] = gf["uint8_t"];
++ gf["byte"] = gf["uint8_t"];
+ gf["std::byte&"] = gf["uint8_t&"];
++ gf["byte&"] = gf["uint8_t&"];
+ gf["const std::byte&"] = gf["const uint8_t&"];
++ gf["const byte&"] = gf["const uint8_t&"];
+ #endif
+ gf["std::int8_t"] = gf["int8_t"];
+ gf["std::int8_t&"] = gf["int8_t&"];
+--
+2.43.0
+
diff --git a/bindings/pyroot/cppyy/patches/CPyCppyy-Always-convert-returned-std-string.patch b/bindings/pyroot/cppyy/patches/CPyCppyy-Always-convert-returned-std-string.patch
index 08a8ac48ea532..2421a72cdf11b 100644
--- a/bindings/pyroot/cppyy/patches/CPyCppyy-Always-convert-returned-std-string.patch
+++ b/bindings/pyroot/cppyy/patches/CPyCppyy-Always-convert-returned-std-string.patch
@@ -89,8 +89,8 @@ index 3ab4c8b3a1..ae0e31cac8 100644
}
+#endif
- // This pythonization is disabled for ROOT because it is a bit buggy
- #if 0
+ if (Cppyy::IsAggregate(((CPPClass*)pyclass)->fCppType) && name.compare(0, 5, "std::", 5) != 0) {
+ // create a pseudo-constructor to allow initializer-style object creation
--
2.44.0
diff --git a/bindings/pyroot/cppyy/patches/CPyCppyy-Don-t-attempt-to-expose-protected-data-members.patch b/bindings/pyroot/cppyy/patches/CPyCppyy-Don-t-attempt-to-expose-protected-data-members.patch
new file mode 100644
index 0000000000000..e373f6b9afa76
--- /dev/null
+++ b/bindings/pyroot/cppyy/patches/CPyCppyy-Don-t-attempt-to-expose-protected-data-members.patch
@@ -0,0 +1,77 @@
+From 8f54f8c5434ff593b5a3acc3f97e4cd5f0310fdd Mon Sep 17 00:00:00 2001
+From: Jonas Rembser
+Date: Thu, 7 Nov 2024 10:19:04 +0100
+Subject: [PATCH] [CPyCppyy] Don't attempt to expose protected data members in
+ dispatcher
+
+This mechanism crashes in Python 3.13, and it also didn't work before
+with previous Python 3 versions:
+
+```python
+import cppyy
+
+cppyy.cppdef("""
+
+class MyBaseClass {
+public:
+ virtual ~MyBaseClass() = default;
+protected:
+ int protectedFunc() { return 5; }
+ int _protectedData = 4;
+};
+
+""")
+
+class MyDerivedClass(cppyy.gbl.MyBaseClass):
+ pass
+
+my_obj = MyDerivedClass()
+
+print(my_obj.protectedFunc()) # works!
+print(my_obj._protectedData) # doesn't work!
+```
+
+Here is the output with Python 3.8 on lxplus for example:
+
+```txt
+5
+Traceback (most recent call last):
+ File "/afs/cern.ch/user/r/rembserj/repro.py", line 21, in
+ print(my_obj._protectedData) # doesn't work!
+AttributeError: 'MyDerivedClass' object has no attribute '_protectedData'
+```
+
+It actually worked in the past before the cppyy upgrade in ROOT 6.32.
+
+Therefore, there is still a regression that should be fixed.
+
+However, commenting out the code that now doesn't work anyway still
+helps to avoid the crashes in Python 3.13, so this commit suggests to do
+this.
+---
+ bindings/pyroot/cppyy/CPyCppyy/src/Dispatcher.cxx | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/Dispatcher.cxx b/bindings/pyroot/cppyy/CPyCppyy/src/Dispatcher.cxx
+index cdef2b8c7b..0fd1705966 100644
+--- a/bindings/pyroot/cppyy/CPyCppyy/src/Dispatcher.cxx
++++ b/bindings/pyroot/cppyy/CPyCppyy/src/Dispatcher.cxx
+@@ -407,6 +407,7 @@ bool CPyCppyy::InsertDispatcher(CPPScope* klass, PyObject* bases, PyObject* dct,
+
+ // destructor: default is fine
+
++#if 0 // doesn't work
+ // pull in data members that are protected
+ bool setPublic = false;
+ for (const auto& binfo : base_infos) {
+@@ -426,6 +427,7 @@ bool CPyCppyy::InsertDispatcher(CPPScope* klass, PyObject* bases, PyObject* dct,
+ }
+ }
+ }
++#endif
+
+ // initialize the dispatch pointer for all direct bases that have one
+ BaseInfos_t::size_type disp_inited = 0;
+--
+2.47.0
+
diff --git a/bindings/pyroot/cppyy/patches/CPyCppyy-Prevent-construction-of-agg-init-for-tuple.patch b/bindings/pyroot/cppyy/patches/CPyCppyy-Prevent-construction-of-agg-init-for-tuple.patch
new file mode 100644
index 0000000000000..eb990aae4ffac
--- /dev/null
+++ b/bindings/pyroot/cppyy/patches/CPyCppyy-Prevent-construction-of-agg-init-for-tuple.patch
@@ -0,0 +1,27 @@
+From 3b62eaa9ec2dfabccca52910d8239af7d9e56c9a Mon Sep 17 00:00:00 2001
+From: maximusron
+Date: Sun, 29 Sep 2024 09:32:17 +0200
+Subject: [PATCH] [PyROOT] Prevent construction of aggregate initializer for
+ std::tuple
+
+---
+ bindings/pyroot/cppyy/CPyCppyy/src/Pythonize.cxx | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/bindings/pyroot/cppyy/CPyCppyy/src/Pythonize.cxx b/bindings/pyroot/cppyy/CPyCppyy/src/Pythonize.cxx
+index b5d5290e46..2196b94ff3 100644
+--- a/bindings/pyroot/cppyy/CPyCppyy/src/Pythonize.cxx
++++ b/bindings/pyroot/cppyy/CPyCppyy/src/Pythonize.cxx
+@@ -1720,7 +1720,8 @@ bool CPyCppyy::Pythonize(PyObject* pyclass, const std::string& name)
+ }
+ #endif
+
+- if (Cppyy::IsAggregate(((CPPClass*)pyclass)->fCppType) && name.compare(0, 5, "std::", 5) != 0) {
++ if (Cppyy::IsAggregate(((CPPClass*)pyclass)->fCppType) && name.compare(0, 5, "std::", 5) != 0 &&
++ name.compare(0, 6, "tuple<", 6) != 0) {
+ // create a pseudo-constructor to allow initializer-style object creation
+ Cppyy::TCppType_t kls = ((CPPClass*)pyclass)->fCppType;
+ Cppyy::TCppIndex_t ndata = Cppyy::GetNumDatamembers(kls);
+--
+2.47.0
+
diff --git a/bindings/pyroot/cppyy/sync-upstream b/bindings/pyroot/cppyy/sync-upstream
index 2139890c32418..c5d5c15e36851 100755
--- a/bindings/pyroot/cppyy/sync-upstream
+++ b/bindings/pyroot/cppyy/sync-upstream
@@ -45,6 +45,7 @@ git apply patches/CPyCppyy-Adapt-to-no-std-in-ROOT.patch
git apply patches/CPyCppyy-Always-convert-returned-std-string.patch
git apply patches/CPyCppyy-Disable-implicit-conversion-to-smart-ptr.patch
git apply patches/CPyCppyy-TString_converter.patch
+git apply patches/CPyCppyy-Prevent-construction-of-agg-init-for-tuple.patch
git apply patches/cppyy-No-CppyyLegacy-namespace.patch
git apply patches/cppyy-Remove-Windows-workaround.patch
git apply patches/cppyy-Don-t-enable-cling-autoloading.patch
diff --git a/bindings/pyroot/pythonizations/CMakeLists.txt b/bindings/pyroot/pythonizations/CMakeLists.txt
index fd19abecfaec6..20f2076e90238 100644
--- a/bindings/pyroot/pythonizations/CMakeLists.txt
+++ b/bindings/pyroot/pythonizations/CMakeLists.txt
@@ -80,31 +80,44 @@ set(py_sources
ROOT/_facade.py
ROOT/__init__.py
ROOT/_numbadeclare.py
+ ROOT/_pythonization/__init__.py
ROOT/_pythonization/_cppinstance.py
ROOT/_pythonization/_drawables.py
ROOT/_pythonization/_generic.py
- ROOT/_pythonization/__init__.py
+ ROOT/_pythonization/_memory_utils.py
ROOT/_pythonization/_pyz_utils.py
- ROOT/_pythonization/_rvec.py
ROOT/_pythonization/_runtime_error.py
+ ROOT/_pythonization/_rvec.py
ROOT/_pythonization/_stl_vector.py
ROOT/_pythonization/_tarray.py
ROOT/_pythonization/_tclass.py
ROOT/_pythonization/_tclonesarray.py
ROOT/_pythonization/_tcollection.py
+ ROOT/_pythonization/_tcolor.py
ROOT/_pythonization/_tcomplex.py
ROOT/_pythonization/_tcontext.py
- ROOT/_pythonization/_tdirectoryfile.py
ROOT/_pythonization/_tdirectory.py
- ROOT/_pythonization/_tfile.py
+ ROOT/_pythonization/_tdirectoryfile.py
+ ROOT/_pythonization/_tefficiency.py
+ ROOT/_pythonization/_tentrylist.py
+ ROOT/_pythonization/_teventlist.py
ROOT/_pythonization/_tf1.py
+ ROOT/_pythonization/_tf2.py
+ ROOT/_pythonization/_tf3.py
+ ROOT/_pythonization/_tfile.py
+ ROOT/_pythonization/_tfilemerger.py
+ ROOT/_pythonization/_tformula.py
ROOT/_pythonization/_tgraph.py
+ ROOT/_pythonization/_tgraph2d.py
ROOT/_pythonization/_th1.py
+ ROOT/_pythonization/_th2.py
+ ROOT/_pythonization/_th3.py
ROOT/_pythonization/_titer.py
ROOT/_pythonization/_tobject.py
ROOT/_pythonization/_tobjstring.py
ROOT/_pythonization/_tseqcollection.py
ROOT/_pythonization/_tstring.py
+ ROOT/_pythonization/_tstyle.py
ROOT/_pythonization/_ttree.py
ROOT/_pythonization/_tvector3.py
ROOT/_pythonization/_tvectort.py
@@ -113,11 +126,9 @@ set(py_sources
set(cpp_sources
src/PyROOTModule.cxx
- src/PyROOTWrapper.cxx
src/RPyROOTApplication.cxx
src/GenericPyz.cxx
src/TClassPyz.cxx
- src/TMemoryRegulator.cxx
src/TObjectPyz.cxx
src/TTreePyz.cxx
src/CPPInstancePyz.cxx
@@ -182,9 +193,9 @@ endif()
# Compile .py files
foreach(py_source ${py_sources})
add_custom_command(TARGET ${libname}
+ POST_BUILD
COMMAND ${Python3_EXECUTABLE} -m py_compile ${localruntimedir}/${py_source}
COMMAND ${Python3_EXECUTABLE} -O -m py_compile ${localruntimedir}/${py_source}
- DEPENDS ${localruntimedir}/${py_source}
COMMENT "Compiling PyROOT source ${py_source} for Python ${Python3_VERSION}")
endforeach()
diff --git a/bindings/pyroot/pythonizations/python/ROOT/__init__.py b/bindings/pyroot/pythonizations/python/ROOT/__init__.py
index 211e2001dfdd7..b8b0e04dfa7ad 100644
--- a/bindings/pyroot/pythonizations/python/ROOT/__init__.py
+++ b/bindings/pyroot/pythonizations/python/ROOT/__init__.py
@@ -184,20 +184,4 @@ def cleanup():
facade.__dict__["app"].keep_polling = False
facade.__dict__["app"].process_root_events.join()
- if "libROOTPythonizations" in sys.modules:
- backend = sys.modules["libROOTPythonizations"]
-
- # Make sure all the objects regulated by PyROOT are deleted and their
- # Python proxies are properly nonified.
- backend.ClearProxiedObjects()
-
- from ROOT import PyConfig
-
- if PyConfig.ShutDown:
- # Hard teardown: run part of the gROOT shutdown sequence.
- # Running it here ensures that it is done before any ROOT libraries
- # are off-loaded, with unspecified order of static object destruction.
- backend.gROOT.EndOfProcessCleanups()
-
-
atexit.register(cleanup)
diff --git a/bindings/pyroot/pythonizations/python/ROOT/_facade.py b/bindings/pyroot/pythonizations/python/ROOT/_facade.py
index 8bb5ff2aa92d9..69a936e271808 100644
--- a/bindings/pyroot/pythonizations/python/ROOT/_facade.py
+++ b/bindings/pyroot/pythonizations/python/ROOT/_facade.py
@@ -8,8 +8,6 @@
import cppyy.ll
-from libROOTPythonizations import gROOT
-
from ._application import PyROOTApplication
from ._numbadeclare import _NumbaDeclareDecorator
@@ -36,7 +34,7 @@ class _gROOTWrapper(object):
def __init__(self, facade):
self.__dict__["_facade"] = facade
- self.__dict__["_gROOT"] = gROOT
+ self.__dict__["_gROOT"] = cppyy.gbl.ROOT.GetROOT()
def __getattr__(self, name):
if name != "SetBatch" and self._facade.__dict__["gROOT"] != self._gROOT:
@@ -158,7 +156,7 @@ def _fallback_getattr(self, name):
elif hasattr(cppyy.gbl.ROOT, name):
return getattr(cppyy.gbl.ROOT, name)
else:
- res = gROOT.FindObject(name)
+ res = self.gROOT.FindObject(name)
if res:
return res
raise AttributeError("Failed to get attribute {} from ROOT".format(name))
@@ -204,7 +202,10 @@ def _register_converters_and_executors(self):
def _finalSetup(self):
# Prevent this method from being re-entered through the gROOT wrapper
- self.__dict__["gROOT"] = gROOT
+ self.__dict__["gROOT"] = cppyy.gbl.ROOT.GetROOT()
+
+ # Make sure the interpreter is initialized once gROOT has been initialized
+ cppyy.gbl.TInterpreter.Instance()
# Setup interactive usage from Python
self.__dict__["app"] = PyROOTApplication(self.PyConfig, self._is_ipython)
@@ -387,7 +388,7 @@ def TMVA(self):
from ._pythonization import _tmva
ns = self._fallback_getattr("TMVA")
- hasRDF = "dataframe" in gROOT.GetConfigFeatures()
+ hasRDF = "dataframe" in self.gROOT.GetConfigFeatures()
if hasRDF:
try:
from ._pythonization._tmva import inject_rbatchgenerator, _AsRTensor, SaveXGBoost
diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_memory_utils.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_memory_utils.py
new file mode 100644
index 0000000000000..350012b2e687d
--- /dev/null
+++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_memory_utils.py
@@ -0,0 +1,65 @@
+# Author: Vincenzo Eduardo Padulano 12/2024
+
+################################################################################
+# Copyright (C) 1995-2024, Rene Brun and Fons Rademakers. #
+# All rights reserved. #
+# #
+# For the licensing terms see $ROOTSYS/LICENSE. #
+# For the list of contributors see $ROOTSYS/README/CREDITS. #
+################################################################################
+
+
+def _should_give_up_ownership(object):
+ """
+ Ownership of objects which automatically register to a directory should be
+ left to C++, except if the object is gROOT.
+ """
+ import ROOT
+
+ tdir = object.GetDirectory()
+ return bool(tdir) and tdir is not ROOT.gROOT
+
+
+def _constructor_releasing_ownership(self, *args, **kwargs):
+ """
+ Forward the arguments to the C++ constructor and give up ownership if the
+ object is attached to a directory, which is then the owner. The only
+ exception is when the owner is gROOT, to avoid introducing a
+ backwards-incompatible change.
+ """
+ import ROOT
+
+ self._cpp_constructor(*args, **kwargs)
+ if _should_give_up_ownership(self):
+ ROOT.SetOwnership(self, False)
+
+
+def _Clone_releasing_ownership(self, *args, **kwargs):
+ """
+ Analogous to _constructor_releasing_ownership, but for the TObject::Clone()
+ implementation.
+ """
+ import ROOT
+
+ out = self._Original_Clone(*args, **kwargs)
+ if _should_give_up_ownership(out):
+ ROOT.SetOwnership(out, False)
+ return out
+
+
+def inject_constructor_releasing_ownership(klass):
+ klass._cpp_constructor = klass.__init__
+ klass.__init__ = _constructor_releasing_ownership
+
+def inject_clone_releasing_ownership(klass):
+ klass._Original_Clone = klass.Clone
+ klass.Clone = _Clone_releasing_ownership
+
+
+def _SetDirectory_SetOwnership(self, dir):
+ self._Original_SetDirectory(dir)
+ if dir:
+ # If we are actually registering with a directory, give ownership to C++
+ import ROOT
+
+ ROOT.SetOwnership(self, False)
diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_rdf_pyz.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_rdf_pyz.py
index cc0b8c7ec98b7..b3b277b14657a 100755
--- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_rdf_pyz.py
+++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_rdf_pyz.py
@@ -332,31 +332,9 @@ def x_more_than_y(x):
rdf_node = _handle_cpp_callables(func, rdf._OriginalFilter, func, *_convert_to_vector(args))
if rdf_node is not None:
return rdf_node
-
- jitter = FunctionJitter(rdf)
- func.__annotations__['return'] = 'bool' # return type for Filters is bool # Note: You can keep double and Filter still works.
-
- col_list = []
- filter_name = ""
-
- if len(args) == 1:
- if isinstance(args[0], list):
- col_list = args[0]
- elif isinstance(args[0], str):
- filter_name = args[0]
- else:
- raise ValueError(f"Argument should be either 'list' or 'str', not {type(args[0]).__name__}.")
-
- elif len(args) == 2:
- if isinstance(args[0], list) and isinstance(args[1], str):
- col_list = args[0]
- filter_name = args[1]
- else:
- raise ValueError(f"Arguments should be ('list', 'str',) not ({type(args[0]).__name__,type(args[1]).__name__}.")
-
-
- func_call = jitter.jit_function(func, col_list, extra_args)
- return rdf._OriginalFilter("Numba::" + func_call, filter_name)
+ else:
+ raise NotImplementedError(
+ f"Passing callables of type {type(func)} will be supported in future versions of ROOT.")
def _PyDefine(rdf, col_name, callable_or_str, cols = [] , extra_args = {} ):
"""
@@ -403,7 +381,6 @@ def x_scaled(x):
rdf_node = _handle_cpp_callables(func, rdf._OriginalDefine, col_name, func, cols)
if rdf_node is not None:
return rdf_node
-
- jitter = FunctionJitter(rdf)
- func_call = jitter.jit_function(func, cols, extra_args)
- return rdf._OriginalDefine(col_name, "Numba::" + func_call)
+ else:
+ raise NotImplementedError(
+ f"Passing callables of type {type(func)} will be supported in future versions of ROOT.")
diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_roofit/_rooworkspace.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_roofit/_rooworkspace.py
index 616d0c3984313..ffc2ddc05a1c0 100644
--- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_roofit/_rooworkspace.py
+++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_roofit/_rooworkspace.py
@@ -110,6 +110,13 @@ def __setattr__(self, name, value):
raise AttributeError('Resetting the "' + name + '" attribute of a RooWorkspace is not allowed!')
object.__setattr__(self, name, value)
+ def _ipython_key_completions_(self):
+ r"""
+ Support tab completion for `__getitem__`, suggesting all components in
+ the workspace.
+ """
+ return [c.GetName() for c in self.components()]
+
def RooWorkspace_import(self, *args, **kwargs):
r"""The RooWorkspace::import function can't be used in PyROOT because `import` is a reserved python keyword.
diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tcolor.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tcolor.py
new file mode 100644
index 0000000000000..a4ddfd2411b8c
--- /dev/null
+++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tcolor.py
@@ -0,0 +1,25 @@
+# Author: Vincenzo Eduardo Padulano CERN 11/2024
+
+################################################################################
+# Copyright (C) 1995-2024, Rene Brun and Fons Rademakers. #
+# All rights reserved. #
+# #
+# For the licensing terms see $ROOTSYS/LICENSE. #
+# For the list of contributors see $ROOTSYS/README/CREDITS. #
+################################################################################
+from . import pythonization
+
+def _TColor_constructor(self, *args, **kwargs):
+ """
+ Forward the arguments to the C++ constructor and retain ownership. This
+ helps avoiding double deletes due to ROOT automatic memory management.
+ """
+ self._cpp_constructor(*args, **kwargs)
+ import ROOT
+ ROOT.SetOwnership(self, False)
+
+
+@pythonization("TColor")
+def pythonize_tcolor(klass):
+ klass._cpp_constructor = klass.__init__
+ klass.__init__ = _TColor_constructor
diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tdirectory.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tdirectory.py
index 01ca47eba1251..3331ec5aad9ba 100644
--- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tdirectory.py
+++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tdirectory.py
@@ -123,12 +123,21 @@ def _TDirectory_WriteObject(self, obj, *args):
return self.WriteObjectAny(obj, type(obj).__cpp_name__, *args)
+def _ipython_key_completions_(self):
+ r"""
+ Support tab completion for `__getitem__`, suggesting the names of all
+ objects in the file.
+ """
+ return [k.GetName() for k in self.GetListOfKeys()]
+
+
def pythonize_tdirectory():
klass = cppyy.gbl.TDirectory
klass.__getitem__ = _TDirectory_getitem
klass.__getattr__ = _TDirectory_getattr
klass._WriteObject = klass.WriteObject
klass.WriteObject = _TDirectory_WriteObject
+ klass._ipython_key_completions_ = _ipython_key_completions_
# Instant pythonization (executed at `import ROOT` time), no need of a
diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tefficiency.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tefficiency.py
new file mode 100644
index 0000000000000..7c9c4c69b9260
--- /dev/null
+++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tefficiency.py
@@ -0,0 +1,26 @@
+# Author: Vincenzo Eduardo Padulano 12/2024
+
+################################################################################
+# Copyright (C) 1995-2024, Rene Brun and Fons Rademakers. #
+# All rights reserved. #
+# #
+# For the licensing terms see $ROOTSYS/LICENSE. #
+# For the list of contributors see $ROOTSYS/README/CREDITS. #
+################################################################################
+
+from . import pythonization
+
+
+def _SetDirectory_SetOwnership(self, dir):
+ self._Original_SetDirectory(dir)
+ if dir:
+ # If we are actually registering with a directory, give ownership to C++
+ import ROOT
+ ROOT.SetOwnership(self, False)
+
+
+@pythonization("TEfficiency")
+def pythonize_tefficiency(klass):
+
+ klass._Original_SetDirectory = klass.SetDirectory
+ klass.SetDirectory = _SetDirectory_SetOwnership
diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tentrylist.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tentrylist.py
new file mode 100644
index 0000000000000..06257a49605d4
--- /dev/null
+++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tentrylist.py
@@ -0,0 +1,21 @@
+# Author: Vincenzo Eduardo Padulano 12/2024
+
+################################################################################
+# Copyright (C) 1995-2024, Rene Brun and Fons Rademakers. #
+# All rights reserved. #
+# #
+# For the licensing terms see $ROOTSYS/LICENSE. #
+# For the list of contributors see $ROOTSYS/README/CREDITS. #
+################################################################################
+
+from . import pythonization
+from ROOT._pythonization._memory_utils import _constructor_releasing_ownership, _SetDirectory_SetOwnership
+
+
+@pythonization("TEntryList")
+def pythonize_tentrylist(klass):
+ klass._cpp_constructor = klass.__init__
+ klass.__init__ = _constructor_releasing_ownership
+
+ klass._Original_SetDirectory = klass.SetDirectory
+ klass.SetDirectory = _SetDirectory_SetOwnership
diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_teventlist.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_teventlist.py
new file mode 100644
index 0000000000000..0175c74fcebb3
--- /dev/null
+++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_teventlist.py
@@ -0,0 +1,21 @@
+# Author: Vincenzo Eduardo Padulano 12/2024
+
+################################################################################
+# Copyright (C) 1995-2024, Rene Brun and Fons Rademakers. #
+# All rights reserved. #
+# #
+# For the licensing terms see $ROOTSYS/LICENSE. #
+# For the list of contributors see $ROOTSYS/README/CREDITS. #
+################################################################################
+
+from . import pythonization
+from ROOT._pythonization._memory_utils import _constructor_releasing_ownership, _SetDirectory_SetOwnership
+
+
+@pythonization("TEventList")
+def pythonize_tentrylist(klass):
+ klass._cpp_constructor = klass.__init__
+ klass.__init__ = _constructor_releasing_ownership
+
+ klass._Original_SetDirectory = klass.SetDirectory
+ klass.SetDirectory = _SetDirectory_SetOwnership
diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tf1.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tf1.py
index 9aa9fc4883aae..5994b6b20ae00 100644
--- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tf1.py
+++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tf1.py
@@ -98,9 +98,23 @@ def _TF1_EvalPar(self, vars, params):
ROOT.Internal.EvalParMultiDim(self, out, x, x_size, nrows, params)
return numpy.frombuffer(out, dtype=numpy.float64, count=nrows)
+
+def _TF1_Constructor(self, *args, **kwargs):
+ """
+ Forward the arguments to the C++ constructor and retain ownership. This
+ helps avoiding double deletes due to ROOT automatic memory management.
+ """
+ self._cpp_constructor(*args, **kwargs)
+ import ROOT
+ ROOT.SetOwnership(self, False)
+
+
@pythonization('TF1')
def pythonize_tf1(klass):
# Pythonizations for TH1::EvalPar
klass._EvalPar = klass.EvalPar
klass.EvalPar = _TF1_EvalPar
+
+ klass._cpp_constructor = klass.__init__
+ klass.__init__ = _TF1_Constructor
diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tf2.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tf2.py
new file mode 100644
index 0000000000000..0ce9220100c00
--- /dev/null
+++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tf2.py
@@ -0,0 +1,25 @@
+# Author: Vincenzo Eduardo Padulano CERN 11/2024
+
+################################################################################
+# Copyright (C) 1995-2024, Rene Brun and Fons Rademakers. #
+# All rights reserved. #
+# #
+# For the licensing terms see $ROOTSYS/LICENSE. #
+# For the list of contributors see $ROOTSYS/README/CREDITS. #
+################################################################################
+from . import pythonization
+
+def _TF2_constructor(self, *args, **kwargs):
+ """
+ Forward the arguments to the C++ constructor and retain ownership. This
+ helps avoiding double deletes due to ROOT automatic memory management.
+ """
+ self._cpp_constructor(*args, **kwargs)
+ import ROOT
+ ROOT.SetOwnership(self, False)
+
+
+@pythonization("TF2")
+def pythonize_tf2(klass):
+ klass._cpp_constructor = klass.__init__
+ klass.__init__ = _TF2_constructor
diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tf3.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tf3.py
new file mode 100644
index 0000000000000..11f5e8db240f6
--- /dev/null
+++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tf3.py
@@ -0,0 +1,25 @@
+# Author: Vincenzo Eduardo Padulano CERN 11/2024
+
+################################################################################
+# Copyright (C) 1995-2024, Rene Brun and Fons Rademakers. #
+# All rights reserved. #
+# #
+# For the licensing terms see $ROOTSYS/LICENSE. #
+# For the list of contributors see $ROOTSYS/README/CREDITS. #
+################################################################################
+from . import pythonization
+
+def _TF3_constructor(self, *args, **kwargs):
+ """
+ Forward the arguments to the C++ constructor and retain ownership. This
+ helps avoiding double deletes due to ROOT automatic memory management.
+ """
+ self._cpp_constructor(*args, **kwargs)
+ import ROOT
+ ROOT.SetOwnership(self, False)
+
+
+@pythonization("TF3")
+def pythonize_tf3(klass):
+ klass._cpp_constructor = klass.__init__
+ klass.__init__ = _TF3_constructor
diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tfilemerger.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tfilemerger.py
new file mode 100644
index 0000000000000..bfed1cb6829e6
--- /dev/null
+++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tfilemerger.py
@@ -0,0 +1,31 @@
+# Author: Giacomo Parolini CERN 12/2024
+
+################################################################################
+# Copyright (C) 1995-2024, Rene Brun and Fons Rademakers. #
+# All rights reserved. #
+# #
+# For the licensing terms see $ROOTSYS/LICENSE. #
+# For the list of contributors see $ROOTSYS/README/CREDITS. #
+################################################################################
+
+from . import pythonization
+
+def _TFileMergerExit(obj, exc_type, exc_val, exc_tb):
+ """
+ Close the merger's output file.
+ Signature and return value are imposed by Python, see
+ https://docs.python.org/3/library/stdtypes.html#typecontextmanager
+ """
+ obj.CloseOutputFile()
+ return False
+
+
+@pythonization('TFileMerger')
+def pythonize_tfile_merger(klass):
+ """
+ TFileMerger works as a context manager.
+ """
+ # Pythonization for __enter__ and __exit__ methods
+ # These make TFileMerger usable in a `with` statement as a context manager
+ klass.__enter__ = lambda merger: merger
+ klass.__exit__ = _TFileMergerExit
diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tformula.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tformula.py
new file mode 100644
index 0000000000000..2bc29dc872979
--- /dev/null
+++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tformula.py
@@ -0,0 +1,28 @@
+# Author: Vincenzo Eduardo Padulano CERN 11/2024
+# Author: Jonas Rembser CERN 11/2024
+
+################################################################################
+# Copyright (C) 1995-2024, Rene Brun and Fons Rademakers. #
+# All rights reserved. #
+# #
+# For the licensing terms see $ROOTSYS/LICENSE. #
+# For the list of contributors see $ROOTSYS/README/CREDITS. #
+################################################################################
+from . import pythonization
+
+
+def _TFormula_Constructor(self, *args, **kwargs):
+ """
+ Forward the arguments to the C++ constructor and retain ownership. This
+ helps avoiding double deletes due to ROOT automatic memory management.
+ """
+ self._cpp_constructor(*args, **kwargs)
+ import ROOT
+ ROOT.SetOwnership(self, False)
+
+
+@pythonization('TFormula')
+def pythonize_tformula(klass):
+
+ klass._cpp_constructor = klass.__init__
+ klass.__init__ = _TFormula_Constructor
diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tgraph2d.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tgraph2d.py
new file mode 100644
index 0000000000000..571f9a3225d55
--- /dev/null
+++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tgraph2d.py
@@ -0,0 +1,21 @@
+# Author: Vincenzo Eduardo Padulano 12/2024
+
+################################################################################
+# Copyright (C) 1995-2024, Rene Brun and Fons Rademakers. #
+# All rights reserved. #
+# #
+# For the licensing terms see $ROOTSYS/LICENSE. #
+# For the list of contributors see $ROOTSYS/README/CREDITS. #
+################################################################################
+
+from . import pythonization
+from ROOT._pythonization._memory_utils import _constructor_releasing_ownership, _SetDirectory_SetOwnership
+
+
+@pythonization("TGraph2D")
+def pythonize_tgraph2d(klass):
+ klass._cpp_constructor = klass.__init__
+ klass.__init__ = _constructor_releasing_ownership
+
+ klass._Original_SetDirectory = klass.SetDirectory
+ klass.SetDirectory = _SetDirectory_SetOwnership
diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_th1.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_th1.py
index e8158168bde96..5f40906e53ddc 100644
--- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_th1.py
+++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_th1.py
@@ -9,7 +9,7 @@
################################################################################
from . import pythonization
-
+from ROOT._pythonization._memory_utils import inject_constructor_releasing_ownership, inject_clone_releasing_ownership, _SetDirectory_SetOwnership
# Multiplication by constant
@@ -23,6 +23,22 @@ def _imul(self, c):
return self
+# The constructors need to be pythonized for each derived class separately:
+_th1_derived_classes_to_pythonize = [
+ "TH1C",
+ "TH1S",
+ "TH1I",
+ "TH1L",
+ "TH1F",
+ "TH1D",
+ "TH1K",
+ "TProfile",
+]
+
+for klass in _th1_derived_classes_to_pythonize:
+ pythonization(klass)(inject_constructor_releasing_ownership)
+
+
@pythonization('TH1')
def pythonize_th1(klass):
# Parameters:
@@ -30,3 +46,8 @@ def pythonize_th1(klass):
# Support hist *= scalar
klass.__imul__ = _imul
+
+ klass._Original_SetDirectory = klass.SetDirectory
+ klass.SetDirectory = _SetDirectory_SetOwnership
+
+ inject_clone_releasing_ownership(klass)
diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_th2.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_th2.py
new file mode 100644
index 0000000000000..0e030374eef7a
--- /dev/null
+++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_th2.py
@@ -0,0 +1,31 @@
+# Author: Vincenzo Eduardo Padulano 12/2024
+
+################################################################################
+# Copyright (C) 1995-2024, Rene Brun and Fons Rademakers. #
+# All rights reserved. #
+# #
+# For the licensing terms see $ROOTSYS/LICENSE. #
+# For the list of contributors see $ROOTSYS/README/CREDITS. #
+################################################################################
+
+from . import pythonization
+from ROOT._pythonization._memory_utils import inject_constructor_releasing_ownership
+
+
+# The constructors need to be pythonized for each derived class separately:
+_th2_derived_classes_to_pythonize = [
+ "TH2C",
+ "TH2S",
+ "TH2I",
+ "TH2L",
+ "TH2F",
+ "TH2D",
+ # "TH2Poly", # Derives from TH2 but does not automatically register
+ # "TH2PolyBin", Does not derive from TH2
+ "TProfile2D",
+ # "TProfile2PolyBin", Derives from TH2PolyBin which does not derive from TH2
+ "TProfile2Poly",
+]
+
+for klass in _th2_derived_classes_to_pythonize:
+ pythonization(klass)(inject_constructor_releasing_ownership)
diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_th3.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_th3.py
new file mode 100644
index 0000000000000..9776d0489c761
--- /dev/null
+++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_th3.py
@@ -0,0 +1,28 @@
+# Author: Vincenzo Eduardo Padulano 12/2024
+
+################################################################################
+# Copyright (C) 1995-2024, Rene Brun and Fons Rademakers. #
+# All rights reserved. #
+# #
+# For the licensing terms see $ROOTSYS/LICENSE. #
+# For the list of contributors see $ROOTSYS/README/CREDITS. #
+################################################################################
+
+from . import pythonization
+from ROOT._pythonization._memory_utils import inject_constructor_releasing_ownership
+
+
+# The constructors need to be pythonized for each derived class separately:
+_th3_derived_classes_to_pythonize = [
+ # "TGLTH3Composition", Derives from TH3 but does not automatically register
+ "TH3C",
+ "TH3S",
+ "TH3I",
+ "TH3L",
+ "TH3F",
+ "TH3D",
+ "TProfile3D",
+]
+
+for klass in _th3_derived_classes_to_pythonize:
+ pythonization(klass)(inject_constructor_releasing_ownership)
diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/__init__.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/__init__.py
index 72c210663d9cf..7e75e92cef032 100644
--- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/__init__.py
+++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/__init__.py
@@ -16,8 +16,6 @@
from .. import pythonization
-from libROOTPythonizations import gROOT
-
from ._factory import Factory
from ._dataloader import DataLoader
from ._crossvalidation import CrossValidation
@@ -45,7 +43,7 @@ def inject_rbatchgenerator(ns):
from ._gnn import RModel_GNN, RModel_GraphIndependent
-hasRDF = "dataframe" in gROOT.GetConfigFeatures()
+hasRDF = "dataframe" in cppyy.gbl.ROOT.GetROOT().GetConfigFeatures()
if hasRDF:
from ._rtensor import get_array_interface, add_array_interface_property, RTensorGetitem, pythonize_rtensor, _AsRTensor
diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_batchgenerator.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_batchgenerator.py
index 008be762aeec8..6cf046e90a97a 100644
--- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_batchgenerator.py
+++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tmva/_batchgenerator.py
@@ -1,3 +1,16 @@
+# Author: Dante Niewenhuis, VU Amsterdam 07/2023
+# Author: Kristupas Pranckietis, Vilnius University 05/2024
+# Author: Nopphakorn Subsa-Ard, King Mongkut's University of Technology Thonburi (KMUTT) (TH) 08/2024
+# Author: Vincenzo Eduardo Padulano, CERN 10/2024
+
+################################################################################
+# Copyright (C) 1995-2024, Rene Brun and Fons Rademakers. #
+# All rights reserved. #
+# #
+# For the licensing terms see $ROOTSYS/LICENSE. #
+# For the list of contributors see $ROOTSYS/README/CREDITS. #
+################################################################################
+
from __future__ import annotations
from typing import Any, Callable, Tuple, TYPE_CHECKING
@@ -12,8 +25,7 @@
class BaseGenerator:
def get_template(
self,
- tree_name: str,
- file_name: str,
+ x_rdf: RNode,
columns: list[str] = list(),
max_vec_sizes: dict[str, int] = dict(),
) -> Tuple[str, list[int]]:
@@ -22,8 +34,7 @@ def get_template(
RDataFrame and columns.
Args:
- file_name (str): name of the root file.
- tree_name (str): name of the tree in the root file.
+ rdataframe (RNode): RDataFrame or RNode object.
columns (list[str]): Columns that should be loaded.
Defaults to loading all columns
in the given RDataFrame
@@ -33,65 +44,23 @@ def get_template(
template (str): Template for the RBatchGenerator
"""
- # from cppyy.gbl.ROOT import RDataFrame
- from ROOT import RDataFrame
-
- x_rdf = RDataFrame(tree_name, file_name)
-
if not columns:
columns = x_rdf.GetColumnNames()
- template_dict = {
- "Bool_t": "bool&",
- "Double_t": "double&",
- "Double32_t": "double&",
- "Float_t": "float&",
- "Float16_t": "float&",
- "Int_t": "int&",
- "UInt_t": "unsigned int&",
- "Long_t": "long&",
- "ULong_t": "unsigned long&",
- "Long64_t": "long long&",
- "ULong64_t": "unsigned long long&",
- "Short_t": "short&",
- "UShort_t": "unsigned short&",
-
- "ROOT::VecOps::RVec": "ROOT::RVec",
- "ROOT::VecOps::RVec": "ROOT::RVec",
- "ROOT::VecOps::RVec": "ROOT::RVec",
- "ROOT::VecOps::RVec": "ROOT::RVec",
- "ROOT::VecOps::RVec": "ROOT::RVec",
- "ROOT::VecOps::RVec": "ROOT::RVec",
- "ROOT::VecOps::RVec": "ROOT::RVec",
- "ROOT::VecOps::RVec": "ROOT::RVec",
- "ROOT::VecOps::RVec": "ROOT::RVec"
- }
-
template_string = ""
self.given_columns = []
self.all_columns = []
- # Get the types of the different columns
max_vec_sizes_list = []
for name in columns:
name_str = str(name)
self.given_columns.append(name_str)
- column_type = template_dict[str(x_rdf.GetColumnType(name_str))]
- template_string += column_type + ","
-
- if column_type in [
- "ROOT::RVec",
- "ROOT::RVec",
- "ROOT::RVec",
- "ROOT::RVec",
- "ROOT::RVec",
- "ROOT::RVec",
- "ROOT::RVec",
- "ROOT::RVec",
- "ROOT::RVec"
- ]:
+ column_type = x_rdf.GetColumnType(name_str)
+ template_string = f"{template_string}{column_type},"
+
+ if "RVec" in column_type:
# Add column for each element if column is a vector
if name_str in max_vec_sizes:
max_vec_sizes_list.append(max_vec_sizes[name_str])
@@ -111,41 +80,37 @@ def get_template(
def __init__(
self,
- tree_name: str,
- file_name: str,
+ rdataframe: RNode,
batch_size: int,
chunk_size: int,
columns: list[str] = list(),
- filters: list[str] = list(),
max_vec_sizes: dict[str, int] = dict(),
vec_padding: int = 0,
- target: str = "",
+ target: str | list[str] = list(),
weights: str = "",
validation_split: float = 0.0,
max_chunks: int = 0,
shuffle: bool = True,
+ drop_remainder: bool = True,
):
"""Wrapper around the Cpp RBatchGenerator
- Args:
- tree_name (str): Name of the tree in the ROOT file
- file_name (str): Path to the ROOT file
+ Args:
+ rdataframe (RNode): Name of RNode object.
batch_size (int): Size of the returned chunks.
chunk_size (int):
- The size of the chunks loaded from the ROOT file. Higher chunk
- size results in better randomization, but higher memory usage.
+ The size of the chunks loaded from the ROOT file. Higher chunk size
+ results in better randomization, but also higher memory usage.
columns (list[str], optional):
Columns to be returned. If not given, all columns are used.
- filters (list[str], optional):
- Filters to apply during loading. If not given, no filters
- are applied.
max_vec_sizes (dict[std, int], optional):
Size of each column that consists of vectors.
Required when using vector based columns.
vec_padding (int):
Value to pad vectors with if the vector is smaller
than the given max vector length. Defaults is 0
- target (str, optional): Column that is used as target.
+ target (str|list[str], optional):
+ Column(s) used as target.
weights (str, optional):
Column used to weight events.
Can only be used when a target is given.
@@ -158,10 +123,17 @@ def __init__(
shuffle (bool):
Batches consist of random events and are shuffled every epoch.
Defaults to True.
+ drop_remainder (bool):
+ Drop the remainder of data that is too small to compose full batch.
+ Defaults to True.
"""
+ import ROOT
+ from ROOT import RDF
+
try:
import numpy as np
+
except ImportError:
raise ImportError(
"Failed to import NumPy during init. NumPy is required when \
@@ -180,45 +152,63 @@ def __init__(
given value is {validation_split}"
)
- # TODO: better linking when importing into ROOT
- # ROOT.gInterpreter.ProcessLine(
- # f'#include "{main_folder}Cpp_files/RBatchGenerator.cpp"')
+ self.noded_rdf = RDF.AsRNode(rdataframe)
+
+ if ROOT.Internal.RDF.GetDataSourceLabel(self.noded_rdf) != "TTreeDS":
+ raise ValueError(
+ "RNode object must be created out of TTrees or files of TTree"
+ )
+
+ if isinstance(target, str):
+ target = [target]
- self.target_column = target
+ self.target_columns = target
self.weights_column = weights
template, max_vec_sizes_list = self.get_template(
- tree_name, file_name, columns, max_vec_sizes
+ rdataframe, columns, max_vec_sizes
)
self.num_columns = len(self.all_columns)
self.batch_size = batch_size
# Handle target
- self.target_given = len(self.target_column) > 0
+ self.target_given = len(self.target_columns) > 0
+ self.weights_given = len(self.weights_column) > 0
if self.target_given:
- if target in self.all_columns:
- self.target_index = self.all_columns.index(self.target_column)
- else:
- raise ValueError(
- f"Provided target not in given columns: \ntarget => \
- {target}\ncolumns => {self.all_columns}"
- )
+ for target in self.target_columns:
+ if target not in self.all_columns:
+ raise ValueError(
+ f"Provided target not in given columns: \ntarget => \
+ {target}\ncolumns => {self.all_columns}")
- # Handle weights
- self.weights_given = len(self.weights_column) > 0
- if self.weights_given and not self.target_given:
- raise ValueError("Weights can only be used when a target is provided")
- if self.weights_given:
- if weights in self.all_columns:
- self.weights_index = self.all_columns.index(self.weights_column)
+ self.target_indices = [self.all_columns.index(
+ target) for target in self.target_columns]
+
+ # Handle weights
+ if self.weights_given:
+ if weights in self.all_columns:
+ self.weights_index = self.all_columns.index(
+ self.weights_column)
+ self.train_indices = [c for c in range(
+ len(self.all_columns)) if c not in self.target_indices+[self.weights_index]]
+ else:
+ raise ValueError(
+ f"Provided weights not in given columns: \nweights => \
+ {weights}\ncolumns => {self.all_columns}"
+ )
else:
- raise ValueError(
- f"Provided weights not in given columns: \nweights => \
- {weights}\ncolumns => {self.all_columns}"
- )
+ self.train_indices = [c for c in range(
+ len(self.all_columns)) if c not in self.target_indices]
- self.train_columns = [c for c in self.all_columns if c not in [target, weights]]
+ elif self.weights_given:
+ raise ValueError(
+ "Weights can only be used when a target is provided")
+ else:
+ self.train_indices = [c for c in range(len(self.all_columns))]
+
+ self.train_columns = [
+ c for c in self.all_columns if c not in self.target_columns+[self.weights_column]]
from ROOT import TMVA, EnableThreadSafety
@@ -228,28 +218,22 @@ def __init__(
# cling via cppyy) and the I/O thread.
EnableThreadSafety()
- expanded_filter = " && ".join(["(" + fltr + ")" for fltr in filters])
-
self.generator = TMVA.Experimental.Internal.RBatchGenerator(template)(
- tree_name,
- file_name,
+ self.noded_rdf,
chunk_size,
batch_size,
self.given_columns,
- expanded_filter,
+ self.num_columns,
max_vec_sizes_list,
vec_padding,
validation_split,
max_chunks,
- self.num_columns,
shuffle,
+ drop_remainder,
)
atexit.register(self.DeActivate)
- def StartValidation(self):
- self.generator.StartValidation()
-
@property
def is_active(self):
return self.generator.IsActive()
@@ -259,7 +243,7 @@ def Activate(self):
self.generator.Activate()
def DeActivate(self):
- """Initialize the generator to be used for a loop"""
+ """Deactivate the generator"""
self.generator.DeActivate()
def GetSample(self):
@@ -281,14 +265,25 @@ def GetSample(self):
return np.zeros((self.batch_size, self.num_columns))
if not self.weights_given:
+ if len(self.target_indices) == 1:
+ return np.zeros((self.batch_size, self.num_columns - 1)), np.zeros(
+ (self.batch_size)).reshape(-1, 1)
+
return np.zeros((self.batch_size, self.num_columns - 1)), np.zeros(
- (self.batch_size)
+ (self.batch_size, len(self.target_indices))
+ )
+
+ if len(self.target_indices) == 1:
+ return (
+ np.zeros((self.batch_size, self.num_columns - 2)),
+ np.zeros((self.batch_size)).reshape(-1, 1),
+ np.zeros((self.batch_size)).reshape(-1, 1),
)
return (
np.zeros((self.batch_size, self.num_columns - 2)),
- np.zeros((self.batch_size)),
- np.zeros((self.batch_size)),
+ np.zeros((self.batch_size, len(self.target_indices))),
+ np.zeros((self.batch_size)).reshape(-1, 1),
)
def ConvertBatchToNumpy(self, batch: "RTensor") -> np.ndarray:
@@ -306,35 +301,30 @@ def ConvertBatchToNumpy(self, batch: "RTensor") -> np.ndarray:
raise ImportError("Failed to import numpy in batchgenerator init")
data = batch.GetData()
- data.reshape((self.batch_size * self.num_columns,))
+ batch_size, num_columns = tuple(batch.GetShape())
- return_data = np.array(data).reshape(self.batch_size, self.num_columns)
+ data.reshape((batch_size * num_columns,))
- # Splice target column from the data if weight is given
+ return_data = np.asarray(data).reshape(batch_size, num_columns)
+
+ # Splice target column from the data if target is given
if self.target_given:
- target_data = return_data[:, self.target_index]
- return_data = np.column_stack(
- (
- return_data[:, : self.target_index],
- return_data[:, self.target_index + 1 :],
- )
- )
+ train_data = return_data[:, self.train_indices]
+ target_data = return_data[:, self.target_indices]
- # Splice weights column from the data if weight is given
+ # Splice weight column from the data if weight is given
if self.weights_given:
- if self.target_index < self.weights_index:
- self.weights_index -= 1
-
weights_data = return_data[:, self.weights_index]
- return_data = np.column_stack(
- (
- return_data[:, : self.weights_index],
- return_data[:, self.weights_index + 1 :],
- )
- )
- return return_data, target_data, weights_data
- return return_data, target_data
+ if len(self.target_indices) == 1:
+ return train_data, target_data.reshape(-1, 1), weights_data.reshape(-1, 1)
+
+ return train_data, target_data, weights_data.reshape(-1, 1)
+
+ if len(self.target_indices) == 1:
+ return train_data, target_data.reshape(-1, 1)
+
+ return train_data, target_data
return return_data
@@ -348,58 +338,77 @@ def ConvertBatchToPyTorch(self, batch: Any) -> torch.Tensor:
torch.Tensor: converted batch
"""
import torch
+ import numpy as np
data = batch.GetData()
- data.reshape((self.batch_size * self.num_columns,))
+ batch_size, num_columns = tuple(batch.GetShape())
- return_data = torch.Tensor(data).reshape(self.batch_size, self.num_columns)
+ data.reshape((batch_size * num_columns,))
- # Splice target column from the data if weight is given
+ return_data = torch.as_tensor(np.asarray(data)).reshape(
+ batch_size, num_columns)
+
+ # Splice target column from the data if target is given
if self.target_given:
- target_data = return_data[:, self.target_index]
- return_data = torch.column_stack(
- (
- return_data[:, : self.target_index],
- return_data[:, self.target_index + 1 :],
- )
- )
+ train_data = return_data[:, self.train_indices]
+ target_data = return_data[:, self.target_indices]
- # Splice weights column from the data if weight is given
+ # Splice weight column from the data if weight is given
if self.weights_given:
- if self.target_index < self.weights_index:
- self.weights_index -= 1
-
weights_data = return_data[:, self.weights_index]
- return_data = torch.column_stack(
- (
- return_data[:, : self.weights_index],
- return_data[:, self.weights_index + 1 :],
- )
- )
- return return_data, target_data, weights_data
- return return_data, target_data
+ if len(self.target_indices) == 1:
+ return train_data, target_data.reshape(-1, 1), weights_data.reshape(-1, 1)
+
+ return train_data, target_data, weights_data.reshape(-1, 1)
+
+ if len(self.target_indices) == 1:
+ return train_data, target_data.reshape(-1, 1)
+
+ return train_data, target_data
return return_data
- def ConvertBatchToTF(self, batch: Any) -> np.ndarray:
+ def ConvertBatchToTF(self, batch: Any) -> Any:
"""
- PLACEHOLDER: at this moment this function only calls the
- ConvertBatchToNumpy function. In the Future this function can be
- used to convert to TF tensors directly
+ Convert a RTensor into a TensorFlow tensor
Args:
batch (RTensor): Batch returned from the RBatchGenerator
Returns:
- np.ndarray: converted batch
+ tensorflow.Tensor: converted batch
"""
- # import tensorflow as tf
+ import tensorflow as tf
+
+ data = batch.GetData()
+ batch_size, num_columns = tuple(batch.GetShape())
- batch = self.ConvertBatchToNumpy(batch)
+ data.reshape((batch_size * num_columns,))
- # TODO: improve this by returning tensorflow tensors
- return batch
+ return_data = tf.constant(data, shape=(batch_size, num_columns))
+
+ if batch_size != self.batch_size:
+ return_data = tf.pad(return_data, tf.constant(
+ [[0, self.batch_size - batch_size], [0, 0]]))
+
+ # Splice target column from the data if weight is given
+ if self.target_given:
+ train_data = tf.gather(
+ return_data, indices=self.train_indices, axis=1)
+ target_data = tf.gather(
+ return_data, indices=self.target_indices, axis=1)
+
+ # Splice weight column from the data if weight is given
+ if self.weights_given:
+ weights_data = tf.gather(return_data, indices=[
+ self.weights_index], axis=1)
+
+ return train_data, target_data, weights_data
+
+ return train_data, target_data
+
+ return return_data
# Return a batch when available
def GetTrainBatch(self) -> Any:
@@ -478,13 +487,21 @@ def train_columns(self) -> list[str]:
return self.base_generator.train_columns
@property
- def target_column(self) -> str:
- return self.base_generator.target_column
+ def target_columns(self) -> str:
+ return self.base_generator.target_columns
@property
def weights_column(self) -> str:
return self.base_generator.weights_column
+ @property
+ def number_of_batches(self) -> int:
+ return self.base_generator.generator.NumberOfTrainingBatches()
+
+ @property
+ def last_batch_no_of_rows(self) -> int:
+ return self.base_generator.generator.TrainRemainderRows()
+
def __iter__(self):
self._callable = self.__call__()
@@ -509,7 +526,7 @@ def __call__(self) -> Any:
while True:
batch = self.base_generator.GetTrainBatch()
- if not batch:
+ if batch is None:
break
yield self.conversion_function(batch)
@@ -542,13 +559,21 @@ def train_columns(self) -> list[str]:
return self.base_generator.train_columns
@property
- def target_column(self) -> str:
- return self.base_generator.target_column
+ def target_columns(self) -> str:
+ return self.base_generator.target_columns
@property
def weights_column(self) -> str:
return self.base_generator.weights_column
+ @property
+ def number_of_batches(self) -> int:
+ return self.base_generator.generator.NumberOfValidationBatches()
+
+ @property
+ def last_batch_no_of_rows(self) -> int:
+ return self.base_generator.generator.ValidationRemainderRows()
+
def __iter__(self):
self._callable = self.__call__()
@@ -571,8 +596,6 @@ def __call__(self) -> Any:
if self.base_generator.is_active:
self.base_generator.DeActivate()
- self.base_generator.StartValidation()
-
while True:
batch = self.base_generator.GetValidationBatch()
@@ -583,41 +606,37 @@ def __call__(self) -> Any:
def CreateNumPyGenerators(
- tree_name: str,
- file_name: str,
+ rdataframe: RNode,
batch_size: int,
chunk_size: int,
columns: list[str] = list(),
- filters: list[str] = list(),
max_vec_sizes: dict[str, int] = dict(),
vec_padding: int = 0,
- target: str = "",
+ target: str | list[str] = list(),
weights: str = "",
validation_split: float = 0.0,
max_chunks: int = 0,
shuffle: bool = True,
+ drop_remainder=True,
) -> Tuple[TrainRBatchGenerator, ValidationRBatchGenerator]:
"""
- Return two batch generators based on the given ROOT file and tree.
+ Return two batch generators based on the given ROOT file and tree or RDataFrame
The first generator returns training batches, while the second generator
returns validation batches
Args:
- tree_name (str): Name of the tree in the ROOT file
- file_name (str): Path to the ROOT file
+ rdataframe (RNode): Name of RNode object.
batch_size (int): Size of the returned chunks.
chunk_size (int):
The size of the chunks loaded from the ROOT file. Higher chunk size
results in better randomization, but also higher memory usage.
columns (list[str], optional):
Columns to be returned. If not given, all columns are used.
- filters (list[str], optional):
- Filters to apply. If not given, no filters are applied.
max_vec_sizes (list[int], optional):
Size of each column that consists of vectors.
Required when using vector based columns
- target (str, optional):
- Column that is used as target.
+ target (str|list[str], optional):
+ Column(s) used as target.
weights (str, optional):
Column used to weight events.
Can only be used when a target is given
@@ -628,22 +647,36 @@ def CreateNumPyGenerators(
The number of chunks that should be loaded for an epoch.
If not given, the whole file is used
shuffle (bool):
- randomize the training batches every epoch. Defaults to True
+ randomize the training batches every epoch.
+ Defaults to True
+ drop_remainder (bool):
+ Drop the remainder of data that is too small to compose full batch.
+ Defaults to True.
+ Let a data list [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] with batch_size=4 be
+ given.
+ If drop_remainder = True, then two batches [0, 1, 2, 3] and
+ [4, 5, 6, 7] will be returned.
+ If drop_remainder = False, then three batches [0, 1, 2, 3],
+ [4, 5, 6, 7] and [8, 9] will be returned.
Returns:
- Tuple[TrainRBatchGenerator, ValidationRBatchGenerator]:
- Two generators are returned. One used to load training batches,
- and one to load validation batches. NOTE: the validation batches
- are loaded during the training. Before training, the validation
- generator will return no batches.
+ TrainRBatchGenerator or
+ Tuple[TrainRBatchGenerator, ValidationRBatchGenerator]:
+ If validation split is 0, return TrainBatchGenerator.
+
+ Otherwise two generators are returned. One used to load training
+ batches, and one to load validation batches. NOTE: the validation
+ batches are loaded during the training. Before training, the
+ validation generator will return no batches.
"""
+
+ import numpy as np
+
base_generator = BaseGenerator(
- tree_name,
- file_name,
+ rdataframe,
batch_size,
chunk_size,
columns,
- filters,
max_vec_sizes,
vec_padding,
target,
@@ -651,11 +684,16 @@ def CreateNumPyGenerators(
validation_split,
max_chunks,
shuffle,
+ drop_remainder,
)
train_generator = TrainRBatchGenerator(
base_generator, base_generator.ConvertBatchToNumpy
)
+
+ if validation_split == 0.0:
+ return train_generator, None
+
validation_generator = ValidationRBatchGenerator(
base_generator, base_generator.ConvertBatchToNumpy
)
@@ -664,41 +702,37 @@ def CreateNumPyGenerators(
def CreateTFDatasets(
- tree_name: str,
- file_name: str,
+ rdataframe: RNode,
batch_size: int,
chunk_size: int,
columns: list[str] = list(),
- filters: list[str] = list(),
max_vec_sizes: dict[str, int] = dict(),
vec_padding: int = 0,
- target: str = "",
+ target: str | list[str] = list(),
weights: str = "",
validation_split: float = 0.0,
max_chunks: int = 0,
shuffle: bool = True,
+ drop_remainder=True,
) -> Tuple[tf.data.Dataset, tf.data.Dataset]:
"""
- Return two Tensorflow Datasets based on the given ROOT file and tree
+ Return two Tensorflow Datasets based on the given ROOT file and tree or RDataFrame
The first generator returns training batches, while the second generator
returns validation batches
Args:
- tree_name (str): Name of the tree in the ROOT file
- file_name (str): Path to the ROOT file
+ rdataframe (RNode): Name of RNode object.
batch_size (int): Size of the returned chunks.
chunk_size (int):
The size of the chunks loaded from the ROOT file. Higher chunk size
results in better randomization, but also higher memory usage.
columns (list[str], optional):
Columns to be returned. If not given, all columns are used.
- filters (list[str], optional):
- Filters to apply. If not given, no filters are applied.
max_vec_sizes (list[int], optional):
Size of each column that consists of vectors.
Required when using vector based columns
- target (str, optional):
- Column that is used as target.
+ target (str|list[str], optional):
+ Column(s) used as target.
weights (str, optional):
Column used to weight events.
Can only be used when a target is given
@@ -709,24 +743,35 @@ def CreateTFDatasets(
The number of chunks that should be loaded for an epoch.
If not given, the whole file is used
shuffle (bool):
- randomize the training batches every epoch. Defaults to True
+ randomize the training batches every epoch.
+ Defaults to True
+ drop_remainder (bool):
+ Drop the remainder of data that is too small to compose full batch.
+ Defaults to True.
+ Let a data list [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] with batch_size=4 be
+ given.
+ If drop_remainder = True, then two batches [0, 1, 2, 3] and
+ [4, 5, 6, 7] will be returned.
+ If drop_remainder = False, then three batches [0, 1, 2, 3],
+ [4, 5, 6, 7] and [8, 9] will be returned.
Returns:
- Tuple[TrainRBatchGenerator, ValidationRBatchGenerator]:
- Two generators are returned. One used to load training batches,
- and one to load validation batches. NOTE: the validation batches
- are loaded during the training. Before training, the validation
- generator will return no batches.
+ TrainRBatchGenerator or
+ Tuple[TrainRBatchGenerator, ValidationRBatchGenerator]:
+ If validation split is 0, return TrainBatchGenerator.
+
+ Otherwise two generators are returned. One used to load training
+ batches, and one to load validation batches. NOTE: the validation
+ batches are loaded during the training. Before training, the
+ validation generator will return no batches.
"""
import tensorflow as tf
base_generator = BaseGenerator(
- tree_name,
- file_name,
+ rdataframe,
batch_size,
chunk_size,
columns,
- filters,
max_vec_sizes,
vec_padding,
target,
@@ -734,6 +779,7 @@ def CreateTFDatasets(
validation_split,
max_chunks,
shuffle,
+ drop_remainder,
)
train_generator = TrainRBatchGenerator(
@@ -743,27 +789,32 @@ def CreateTFDatasets(
base_generator, base_generator.ConvertBatchToTF
)
- num_columns = len(train_generator.train_columns)
+ num_train_columns = len(train_generator.train_columns)
+ num_target_columns = len(train_generator.target_columns)
# No target and weights given
if target == "":
batch_signature = tf.TensorSpec(
- shape=(batch_size, num_columns), dtype=tf.float32
+ shape=(batch_size, num_train_columns), dtype=tf.float32
)
# Target given, no weights given
elif weights == "":
batch_signature = (
- tf.TensorSpec(shape=(batch_size, num_columns), dtype=tf.float32),
- tf.TensorSpec(shape=(batch_size,), dtype=tf.float32),
+ tf.TensorSpec(shape=(batch_size, num_train_columns),
+ dtype=tf.float32),
+ tf.TensorSpec(shape=(batch_size, num_target_columns),
+ dtype=tf.float32),
)
# Target and weights given
else:
batch_signature = (
- tf.TensorSpec(shape=(batch_size, num_columns), dtype=tf.float32),
- tf.TensorSpec(shape=(batch_size,), dtype=tf.float32),
- tf.TensorSpec(shape=(batch_size,), dtype=tf.float32),
+ tf.TensorSpec(shape=(batch_size, num_train_columns),
+ dtype=tf.float32),
+ tf.TensorSpec(shape=(batch_size, num_target_columns),
+ dtype=tf.float32),
+ tf.TensorSpec(shape=(batch_size, 1), dtype=tf.float32),
)
ds_train = tf.data.Dataset.from_generator(
@@ -773,8 +824,12 @@ def CreateTFDatasets(
# Give access to the columns function of the training set
setattr(ds_train, "columns", train_generator.columns)
setattr(ds_train, "train_columns", train_generator.train_columns)
- setattr(ds_train, "target_column", train_generator.target_column)
+ setattr(ds_train, "target_column", train_generator.target_columns)
setattr(ds_train, "weights_column", train_generator.weights_column)
+ setattr(ds_train, "number_of_batches", train_generator.number_of_batches)
+
+ if validation_split == 0.0:
+ return ds_train
ds_validation = tf.data.Dataset.from_generator(
validation_generator, output_signature=batch_signature
@@ -783,48 +838,46 @@ def CreateTFDatasets(
# Give access to the columns function of the validation set
setattr(ds_validation, "columns", train_generator.columns)
setattr(ds_validation, "train_columns", train_generator.train_columns)
- setattr(ds_validation, "target_column", train_generator.target_column)
+ setattr(ds_validation, "target_column", train_generator.target_columns)
setattr(ds_validation, "weights_column", train_generator.weights_column)
+ setattr(ds_validation, "number_of_batches",
+ validation_generator.number_of_batches)
return ds_train, ds_validation
def CreatePyTorchGenerators(
- tree_name: str,
- file_name: str,
+ rdataframe: RNode,
batch_size: int,
chunk_size: int,
columns: list[str] = list(),
- filters: list[str] = list(),
max_vec_sizes: dict[str, int] = dict(),
vec_padding: int = 0,
- target: str = "",
+ target: str | list[str] = list(),
weights: str = "",
validation_split: float = 0.0,
max_chunks: int = 0,
shuffle: bool = True,
+ drop_remainder=True,
) -> Tuple[TrainRBatchGenerator, ValidationRBatchGenerator]:
"""
- Return two Tensorflow Datasets based on the given ROOT file and tree
+ Return two Tensorflow Datasets based on the given ROOT file and tree or RDataFrame
The first generator returns training batches, while the second generator
returns validation batches
Args:
- tree_name (str): Name of the tree in the ROOT file
- file_name (str): Path to the ROOT file
+ rdataframe (RNode): Name of RNode object.
batch_size (int): Size of the returned chunks.
chunk_size (int):
The size of the chunks loaded from the ROOT file. Higher chunk size
results in better randomization, but also higher memory usage.
columns (list[str], optional):
Columns to be returned. If not given, all columns are used.
- filters (list[str], optional):
- Filters to apply. If not given, no filters are applied.
max_vec_sizes (list[int], optional):
Size of each column that consists of vectors.
Required when using vector based columns
- target (str, optional):
- Column that is used as target.
+ target (str|list[str], optional):
+ Column(s) used as target.
weights (str, optional):
Column used to weight events.
Can only be used when a target is given
@@ -835,22 +888,33 @@ def CreatePyTorchGenerators(
The number of chunks that should be loaded for an epoch.
If not given, the whole file is used
shuffle (bool):
- randomize the training batches every epoch. Defaults to True
+ randomize the training batches every epoch.
+ Defaults to True
+ drop_remainder (bool):
+ Drop the remainder of data that is too small to compose full batch.
+ Defaults to True.
+ Let a data list [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] with batch_size=4 be
+ given.
+ If drop_remainder = True, then two batches [0, 1, 2, 3] and
+ [4, 5, 6, 7] will be returned.
+ If drop_remainder = False, then three batches [0, 1, 2, 3],
+ [4, 5, 6, 7] and [8, 9] will be returned.
Returns:
- Tuple[TrainRBatchGenerator, ValidationRBatchGenerator]:
- Two generators are returned. One used to load training batches,
- and one to load validation batches. NOTE: the validation batches
- are loaded during the training. Before training, the validation
- generator will return no batches.
+ TrainRBatchGenerator or
+ Tuple[TrainRBatchGenerator, ValidationRBatchGenerator]:
+ If validation split is 0, return TrainBatchGenerator.
+
+ Otherwise two generators are returned. One used to load training
+ batches, and one to load validation batches. NOTE: the validation
+ batches are loaded during the training. Before training, the
+ validation generator will return no batches.
"""
base_generator = BaseGenerator(
- tree_name,
- file_name,
+ rdataframe,
batch_size,
chunk_size,
columns,
- filters,
max_vec_sizes,
vec_padding,
target,
@@ -858,11 +922,16 @@ def CreatePyTorchGenerators(
validation_split,
max_chunks,
shuffle,
+ drop_remainder,
)
train_generator = TrainRBatchGenerator(
base_generator, base_generator.ConvertBatchToPyTorch
)
+
+ if validation_split == 0.0:
+ return train_generator
+
validation_generator = ValidationRBatchGenerator(
base_generator, base_generator.ConvertBatchToPyTorch
)
diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tstyle.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tstyle.py
new file mode 100644
index 0000000000000..aed3a901a5aa9
--- /dev/null
+++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_tstyle.py
@@ -0,0 +1,29 @@
+# Author: Jonas Rembser CERN 11/2024
+
+################################################################################
+# Copyright (C) 1995-2024, Rene Brun and Fons Rademakers. #
+# All rights reserved. #
+# #
+# For the licensing terms see $ROOTSYS/LICENSE. #
+# For the list of contributors see $ROOTSYS/README/CREDITS. #
+################################################################################
+
+from . import pythonization
+
+
+def _TStyle_Constructor(self, *args, **kwargs):
+ """
+ Forward the arguments to the C++ constructor and retain ownership. This
+ helps avoiding double deletes due to ROOT automatic memory management.
+ """
+ self._cpp_constructor(*args, **kwargs)
+ import ROOT
+
+ ROOT.SetOwnership(self, False)
+
+
+@pythonization("TStyle")
+def pythonize_tstyle(klass):
+
+ klass._cpp_constructor = klass.__init__
+ klass.__init__ = _TStyle_Constructor
diff --git a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_ttree.py b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_ttree.py
index 1957db444c843..36a4b53f97409 100644
--- a/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_ttree.py
+++ b/bindings/pyroot/pythonizations/python/ROOT/_pythonization/_ttree.py
@@ -133,7 +133,7 @@
from libROOTPythonizations import GetBranchAttr, BranchPyz
from ._rvec import _array_interface_dtype_map, _get_cpp_type_from_numpy_type
from . import pythonization
-
+from ROOT._pythonization._memory_utils import _should_give_up_ownership, _constructor_releasing_ownership, _SetDirectory_SetOwnership
# TTree iterator
def _TTree__iter__(self):
@@ -279,6 +279,18 @@ def _TTree__getattr__(self, key):
out = cppyy.ll.cast[cast_type](out)
return out
+def _TTree_CloneTree(self, *args, **kwargs):
+ """
+ Forward the arguments to the C++ function and give up ownership if the
+ TTree is attached to a TFile, which is the owner in that case.
+ """
+ import ROOT
+
+ out_tree = self._CloneTree(*args, **kwargs)
+ if _should_give_up_ownership(out_tree):
+ ROOT.SetOwnership(out_tree, False)
+
+ return out_tree
@pythonization("TTree")
def pythonize_ttree(klass, name):
@@ -286,6 +298,14 @@ def pythonize_ttree(klass, name):
# klass: class to be pythonized
# name: string containing the name of the class
+ # Functions that need to drop the ownership if the current directory is a TFile
+
+ klass._cpp_constructor = klass.__init__
+ klass.__init__ = _constructor_releasing_ownership
+
+ klass._CloneTree = klass.CloneTree
+ klass.CloneTree = _TTree_CloneTree
+
# Pythonizations that are common to TTree and its subclasses.
# To avoid duplicating the same logic in the pythonizors of
# the subclasses, inject the pythonizations for all the target
@@ -305,6 +325,9 @@ def pythonize_ttree(klass, name):
klass._OriginalBranch = klass.Branch
klass.Branch = _Branch
+ klass._Original_SetDirectory = klass.SetDirectory
+ klass.SetDirectory = _SetDirectory_SetOwnership
+
@pythonization("TChain")
def pythonize_tchain(klass):
@@ -321,3 +344,10 @@ def pythonize_tchain(klass):
# SetBranchAddress
klass._OriginalSetBranchAddress = klass.SetBranchAddress
klass.SetBranchAddress = _SetBranchAddress
+
+@pythonization("TNtuple")
+def pythonize_tchain(klass):
+
+ # The constructor needs to be explicitly pythonized for derived classes.
+ klass._cpp_constructor = klass.__init__
+ klass.__init__ = _constructor_releasing_ownership
diff --git a/bindings/pyroot/pythonizations/src/PyROOTModule.cxx b/bindings/pyroot/pythonizations/src/PyROOTModule.cxx
index 070176e18ffdb..efed441222db5 100644
--- a/bindings/pyroot/pythonizations/src/PyROOTModule.cxx
+++ b/bindings/pyroot/pythonizations/src/PyROOTModule.cxx
@@ -11,7 +11,6 @@
// Bindings
#include "PyROOTPythonize.h"
-#include "PyROOTWrapper.h"
#include "RPyROOTApplication.h"
// Cppyy
@@ -20,6 +19,7 @@
#include "../../cppyy/CPyCppyy/src/ProxyWrappers.h"
// ROOT
+#include "TInterpreter.h"
#include "TROOT.h"
#include "TSystem.h"
#include "RConfigure.h"
@@ -82,8 +82,6 @@ static PyMethodDef gPyROOTMethods[] = {
(char *)"Install an input hook to process GUI events"},
{(char *)"_CPPInstance__expand__", (PyCFunction)PyROOT::CPPInstanceExpand, METH_VARARGS,
(char *)"Deserialize a pickled object"},
- {(char *)"ClearProxiedObjects", (PyCFunction)PyROOT::ClearProxiedObjects, METH_NOARGS,
- (char *)"Clear proxied objects regulated by PyROOT"},
{(char *)"JupyROOTExecutor", (PyCFunction)JupyROOTExecutor, METH_VARARGS, (char *)"Create JupyROOTExecutor"},
{(char *)"JupyROOTDeclarer", (PyCFunction)JupyROOTDeclarer, METH_VARARGS, (char *)"Create JupyROOTDeclarer"},
{(char *)"JupyROOTExecutorHandler_Clear", (PyCFunction)JupyROOTExecutorHandler_Clear, METH_NOARGS,
@@ -146,8 +144,13 @@ extern "C" PyObject *PyInit_libROOTPythonizations()
// keep gRootModule, but do not increase its reference count even as it is borrowed,
// or a self-referencing cycle would be created
- // setup PyROOT
- PyROOT::Init();
+ // Initialize and acquire the GIL to allow for threading in ROOT
+#if PY_VERSION_HEX < 0x03090000
+ PyEval_InitThreads();
+#endif
+
+ // Make sure the interpreter is initialized once gROOT has been initialized
+ TInterpreter::Instance();
// signal policy: don't abort interpreter in interactive mode
CallContext::SetGlobalSignalPolicy(!gROOT->IsBatch());
diff --git a/bindings/pyroot/pythonizations/src/PyROOTWrapper.cxx b/bindings/pyroot/pythonizations/src/PyROOTWrapper.cxx
deleted file mode 100644
index 42c3988a30f56..0000000000000
--- a/bindings/pyroot/pythonizations/src/PyROOTWrapper.cxx
+++ /dev/null
@@ -1,74 +0,0 @@
-// Author: Enric Tejedor CERN 06/2018
-// Original PyROOT code by Wim Lavrijsen, LBL
-
-/*************************************************************************
- * Copyright (C) 1995-2018, Rene Brun and Fons Rademakers. *
- * All rights reserved. *
- * *
- * For the licensing terms see $ROOTSYS/LICENSE. *
- * For the list of contributors see $ROOTSYS/README/CREDITS. *
- *************************************************************************/
-
-// Bindings
-#include "PyROOTWrapper.h"
-#include "TMemoryRegulator.h"
-
-// Cppyy
-#include "CPyCppyy/API.h"
-
-// ROOT
-#include "TROOT.h"
-#include "TSystem.h"
-#include "TClass.h"
-#include "TInterpreter.h"
-#include "DllImport.h"
-
-namespace PyROOT {
-R__EXTERN PyObject *gRootModule;
-}
-
-using namespace PyROOT;
-
-namespace {
-
-static void AddToGlobalScope(const char *label, TObject *obj, const char *classname)
-{
- // Bind the given object with the given class in the global scope with the
- // given label for its reference.
- PyModule_AddObject(gRootModule, label, CPyCppyy::Instance_FromVoidPtr(obj, classname));
-}
-
-} // unnamed namespace
-
-PyROOT::RegulatorCleanup &GetRegulatorCleanup()
-{
- // The object is thread-local because it can happen that we call into
- // C++ code (from the PyROOT CPython extension, from CPyCppyy or from cling)
- // from different Python threads. A notable example is within a distributed
- // RDataFrame application running on Dask.
- thread_local PyROOT::RegulatorCleanup m;
- return m;
-}
-
-void PyROOT::Init()
-{
- // Initialize and acquire the GIL to allow for threading in ROOT
-#if PY_VERSION_HEX < 0x03090000
- PyEval_InitThreads();
-#endif
-
- // Memory management
- gROOT->GetListOfCleanups()->Add(&GetRegulatorCleanup());
-
- // Bind ROOT globals that will be needed in ROOT.py
- AddToGlobalScope("gROOT", gROOT, gROOT->IsA()->GetName());
- AddToGlobalScope("gSystem", gSystem, gSystem->IsA()->GetName());
- AddToGlobalScope("gInterpreter", gInterpreter, gInterpreter->IsA()->GetName());
-}
-
-PyObject *PyROOT::ClearProxiedObjects(PyObject * /* self */, PyObject * /* args */)
-{
- // Delete all memory-regulated objects
- GetRegulatorCleanup().CallClearProxiedObjects();
- Py_RETURN_NONE;
-}
diff --git a/bindings/pyroot/pythonizations/src/TMemoryRegulator.cxx b/bindings/pyroot/pythonizations/src/TMemoryRegulator.cxx
deleted file mode 100644
index 4502d826b2bab..0000000000000
--- a/bindings/pyroot/pythonizations/src/TMemoryRegulator.cxx
+++ /dev/null
@@ -1,108 +0,0 @@
-
-// Author: Enric Tejedor CERN 08/2019
-// Author: Vincenzo Eduardo Padulano CERN 05/2024
-
-/*************************************************************************
- * Copyright (C) 1995-2019, Rene Brun and Fons Rademakers. *
- * All rights reserved. *
- * *
- * For the licensing terms see $ROOTSYS/LICENSE. *
- * For the list of contributors see $ROOTSYS/README/CREDITS. *
- *************************************************************************/
-
-#include "TMemoryRegulator.h"
-
-#include "../../cppyy/CPyCppyy/src/ProxyWrappers.h"
-#include "../../cppyy/CPyCppyy/src/CPPInstance.h"
-
-////////////////////////////////////////////////////////////////////////////
-/// \brief Constructor. Registers the hooks to run on Cppyy's object
-/// construction and destruction
-PyROOT::TMemoryRegulator::TMemoryRegulator()
-{
- CPyCppyy::MemoryRegulator::SetRegisterHook(
- [this](Cppyy::TCppObject_t cppobj, Cppyy::TCppType_t klass) { return this->RegisterHook(cppobj, klass); });
- CPyCppyy::MemoryRegulator::SetUnregisterHook(
- [this](Cppyy::TCppObject_t cppobj, Cppyy::TCppType_t klass) { return this->UnregisterHook(cppobj, klass); });
-}
-
-////////////////////////////////////////////////////////////////////////////
-/// \brief Register a hook that Cppyy runs when constructing an object.
-/// \param[in] cppobj Address of the object.
-/// \param[in] klass Class id of the object.
-/// \return Pair of two booleans. First indicates success, second tells
-/// Cppyy if we want to continue running RegisterPyObject
-std::pair PyROOT::TMemoryRegulator::RegisterHook(Cppyy::TCppObject_t cppobj, Cppyy::TCppType_t klass)
-{
- static Cppyy::TCppType_t tobjectTypeID = (Cppyy::TCppType_t)Cppyy::GetScope("TObject");
-
- if (Cppyy::IsSubtype(klass, tobjectTypeID)) {
- fObjectMap.insert({cppobj, klass});
- }
-
- return {true, true};
-}
-
-////////////////////////////////////////////////////////////////////////////
-/// \brief Register a hook that Cppyy runs when deleting an object.
-/// \param[in] cppobj Address of the object.
-/// \param[in] klass Class id of the object.
-/// \return Pair of two booleans. First indicates success, second tells
-/// Cppyy if we want to continue running UnRegisterPyObject
-std::pair PyROOT::TMemoryRegulator::UnregisterHook(Cppyy::TCppObject_t cppobj, Cppyy::TCppType_t klass)
-{
-
- static Cppyy::TCppType_t tobjectTypeID = (Cppyy::TCppType_t)Cppyy::GetScope("TObject");
-
- if (Cppyy::IsSubtype(klass, tobjectTypeID)) {
- if (auto it = fObjectMap.find(cppobj); it != fObjectMap.end())
- fObjectMap.erase(it);
- }
-
- return {true, true};
-}
-
-////////////////////////////////////////////////////////////////////////////
-/// \brief Get the class id of the TObject being deleted and run Cppyy's
-/// RecursiveRemove.
-/// \param[in] object Object being destructed.
-void PyROOT::TMemoryRegulator::CallCppyyRecursiveRemove(TObject *object)
-{
- auto cppobj = reinterpret_cast(object);
-
- if (auto it = fObjectMap.find(cppobj); it != fObjectMap.end()) {
- CPyCppyy::MemoryRegulator::RecursiveRemove(cppobj, it->second);
- fObjectMap.erase(it);
- }
-}
-
-////////////////////////////////////////////////////////////////////////////
-/// \brief Clean up all tracked objects.
-void PyROOT::TMemoryRegulator::ClearProxiedObjects()
-{
- while (!fObjectMap.empty()) {
- auto elem = fObjectMap.begin();
- auto cppobj = elem->first;
- auto klassid = elem->second;
- auto pyclass = CPyCppyy::CreateScopeProxy(klassid);
- auto pyobj = (CPyCppyy::CPPInstance *)CPyCppyy::MemoryRegulator::RetrievePyObject(cppobj, pyclass);
-
- if (pyobj && (pyobj->fFlags & CPyCppyy::CPPInstance::kIsOwner)) {
- // Only delete the C++ object if the Python proxy owns it.
- // If it is a value, cppyy deletes it in RecursiveRemove as part of
- // the proxy cleanup.
- auto o = static_cast(cppobj);
- bool isValue = pyobj->fFlags & CPyCppyy::CPPInstance::kIsValue;
- CallCppyyRecursiveRemove(o);
- if (!isValue)
- delete o;
- } else {
- // Non-owning proxy, just unregister to clean tables.
- // The proxy deletion by Python will have no effect on C++, so all good
- bool ret = CPyCppyy::MemoryRegulator::UnregisterPyObject(pyobj, pyclass);
- if (!ret) {
- fObjectMap.erase(elem);
- }
- }
- }
-}
diff --git a/bindings/pyroot/pythonizations/src/TMemoryRegulator.h b/bindings/pyroot/pythonizations/src/TMemoryRegulator.h
deleted file mode 100644
index c2d50f7a41149..0000000000000
--- a/bindings/pyroot/pythonizations/src/TMemoryRegulator.h
+++ /dev/null
@@ -1,96 +0,0 @@
-
-// Author: Enric Tejedor CERN 08/2019
-// Author: Vincenzo Eduardo Padulano CERN 05/2024
-
-/*************************************************************************
- * Copyright (C) 1995-2019, Rene Brun and Fons Rademakers. *
- * All rights reserved. *
- * *
- * For the licensing terms see $ROOTSYS/LICENSE. *
- * For the list of contributors see $ROOTSYS/README/CREDITS. *
- *************************************************************************/
-
-#ifndef PYROOT_TMEMORYREGULATOR_H
-#define PYROOT_TMEMORYREGULATOR_H
-
-//////////////////////////////////////////////////////////////////////////
-// //
-// TMemoryRegulator //
-// //
-// Sets hooks in Cppyy's MemoryRegulator to keep track of the TObjects //
-// that are constructed and destructed. For those objects, a map is //
-// filled, where the key is the address of the object and the value is //
-// the class to which the object belongs. //
-// //
-// The TMemoryRegulator object, created in PyROOTWrapper.cxx, is added //
-// to the list of cleanups and its RecursiveRemove method is called by //
-// ROOT to manage the memory of TObjects being deleted. //
-// In RecursiveRemove, the object being deleted is already a TNamed, so //
-// the information about its actual class is not available anymore. //
-// To solve the problem, the map above is used to know the class of the //
-// object, so that Cppyy's RecursiveRemove can be called passing the //
-// class as argument. //
-//////////////////////////////////////////////////////////////////////////
-
-// Bindings
-// CPyCppyy.h must be go first, since it includes Python.h, which must be
-// included before any standard header
-#include "../../cppyy/CPyCppyy/src/CPyCppyy.h"
-#include "../../cppyy/CPyCppyy/src/MemoryRegulator.h"
-
-// ROOT
-#include "TObject.h"
-
-// Stl
-#include
-
-namespace PyROOT {
-
-class RegulatorCleanup;
-
-/// Manages TObject-derived objects created in a PyROOT application
-///
-/// This class is responsible to keep track of the creation of the objects
-/// that need further memory management within ROOT. The `ClearProxiedObjects`
-/// function is only called at PyROOT shutdown time. The `CallCppyyRecursiveRemove`
-/// is called as part of the global list of cleanups object destruction.
-///
-/// This class is intentionally not derived from TObject. See the
-/// `PyROOT::RegulatorCleanup` class for more info.
-///
-/// \note This class is not thread-safe on its own. We create one thread-local
-/// object in PyROOTWrapper.cxx.
-class TMemoryRegulator final {
- using ObjectMap_t = std::unordered_map;
-
- ObjectMap_t fObjectMap{}; // key: object address; value: object class id
-
- std::pair RegisterHook(Cppyy::TCppObject_t, Cppyy::TCppType_t);
-
- std::pair UnregisterHook(Cppyy::TCppObject_t, Cppyy::TCppType_t);
-
- void CallCppyyRecursiveRemove(TObject *object);
-
- void ClearProxiedObjects();
-
- TMemoryRegulator();
-
- friend class RegulatorCleanup;
-};
-
-/// A TObject-derived class to inject the memory regulation logic in the ROOT list of cleanups.
-///
-/// The purpose of this class is to keep the responsibilities separate between
-/// the TMemoryRegulator logic and the rest of ROOT.
-class RegulatorCleanup final : public TObject {
- TMemoryRegulator fRegulator{};
-
-public:
- void RecursiveRemove(TObject *object) final { fRegulator.CallCppyyRecursiveRemove(object); }
- void CallClearProxiedObjects() { fRegulator.ClearProxiedObjects(); }
- ClassDefInlineNV(RegulatorCleanup, 0);
-};
-
-} // namespace PyROOT
-
-#endif // !PYROOT_TMEMORYREGULATOR_H
diff --git a/bindings/pyroot/pythonizations/test/CMakeLists.txt b/bindings/pyroot/pythonizations/test/CMakeLists.txt
index 40bea60984b2d..5e4b82f70a949 100644
--- a/bindings/pyroot/pythonizations/test/CMakeLists.txt
+++ b/bindings/pyroot/pythonizations/test/CMakeLists.txt
@@ -138,24 +138,21 @@ endif()
ROOT_ADD_PYUNITTEST(pyroot_pyz_tf_pycallables tf_pycallables.py)
if(roofit)
- # RooAbsCollection and subclasses pythonizations
- if(NOT MSVC OR CMAKE_SIZEOF_VOID_P EQUAL 4 OR win_broken_tests)
- ROOT_ADD_PYUNITTEST(pyroot_roofit_rooabscollection roofit/rooabscollection.py)
- endif()
- ROOT_ADD_PYUNITTEST(pyroot_roofit_rooarglist roofit/rooarglist.py)
-
- # RooDataHist pythonisations
- ROOT_ADD_PYUNITTEST(pyroot_roofit_roodatahist_ploton roofit/roodatahist_ploton.py)
- # RooDataSet pythonisations
- ROOT_ADD_PYUNITTEST(pyroot_roofit_roodataset roofit/roodataset.py)
-
- # RooWorkspace pythonizations
ROOT_ADD_PYUNITTEST(pyroot_roofit_rooabspdf_fitto roofit/rooabspdf_fitto.py)
ROOT_ADD_PYUNITTEST(pyroot_roofit_rooabsreal_ploton roofit/rooabsreal_ploton.py)
-
+ ROOT_ADD_PYUNITTEST(pyroot_roofit_rooarglist roofit/rooarglist.py)
+ ROOT_ADD_PYUNITTEST(pyroot_roofit_roocmdarg roofit/roocmdarg.py)
+ ROOT_ADD_PYUNITTEST(pyroot_roofit_roodatahist_numpy roofit/roodatahist_numpy.py PYTHON_DEPS numpy)
+ ROOT_ADD_PYUNITTEST(pyroot_roofit_roodatahist_ploton roofit/roodatahist_ploton.py)
+ ROOT_ADD_PYUNITTEST(pyroot_roofit_roodataset roofit/roodataset.py)
+ ROOT_ADD_PYUNITTEST(pyroot_roofit_roodataset_numpy roofit/roodataset_numpy.py PYTHON_DEPS numpy)
ROOT_ADD_PYUNITTEST(pyroot_roofit_roolinkedlist roofit/roolinkedlist.py)
+ if(NOT MSVC OR CMAKE_SIZEOF_VOID_P EQUAL 4 OR win_broken_tests)
+ ROOT_ADD_PYUNITTEST(pyroot_roofit_rooabscollection roofit/rooabscollection.py)
+ endif()
+
if(NOT MSVC OR win_broken_tests)
# Test pythonizations for the RooFitHS3 package, which is not built on Windows.
ROOT_ADD_PYUNITTEST(pyroot_roofit_roojsonfactorywstool roofit/roojsonfactorywstool.py)
@@ -168,10 +165,6 @@ if(roofit)
ROOT_ADD_PYUNITTEST(pyroot_roofit_rooworkspace roofit/rooworkspace.py)
endif()
- # NumPy compatibility
- ROOT_ADD_PYUNITTEST(pyroot_roofit_roodataset_numpy roofit/roodataset_numpy.py PYTHON_DEPS numpy)
- ROOT_ADD_PYUNITTEST(pyroot_roofit_roodatahist_numpy roofit/roodatahist_numpy.py PYTHON_DEPS numpy)
-
endif()
if (dataframe)
@@ -192,3 +185,10 @@ ROOT_ADD_PYUNITTEST(pyroot_tcomplex tcomplex_operators.py)
# Tests with memory usage
ROOT_ADD_PYUNITTEST(pyroot_memory memory.py)
+
+# rbatchgenerator tests
+# TODO: We currently do not support TensorFlow for Python >= 3.12 (see requirements.txt)
+# Update here once that is fixed.
+if (NOT MSVC AND Python3_VERSION VERSION_LESS 3.12)
+ ROOT_ADD_PYUNITTEST(batchgen rbatchgenerator_completeness.py PYTHON_DEPS numpy tensorflow torch)
+endif()
diff --git a/bindings/pyroot/pythonizations/test/import_load_libs.py b/bindings/pyroot/pythonizations/test/import_load_libs.py
index c370a3cd42f2c..24050959a1ebb 100644
--- a/bindings/pyroot/pythonizations/test/import_load_libs.py
+++ b/bindings/pyroot/pythonizations/test/import_load_libs.py
@@ -40,6 +40,7 @@ class ImportLoadLibs(unittest.TestCase):
'libssl',
'libcrypt.*', # by libssl
'libtbb',
+ 'libtbb_debug',
'libtbbmalloc',
'liburing', # by libRIO if uring option is enabled
# On centos7 libssl links against kerberos pulling in all dependencies below, removed with libssl1.1.0
@@ -69,6 +70,7 @@ class ImportLoadLibs(unittest.TestCase):
'libnss_.*',
'ld.*',
'libffi',
+ 'libgcc_s',
# AddressSanitizer runtime and ROOT configuration
'libclang_rt.asan-.*',
'libROOTSanitizerConfig',
diff --git a/bindings/pyroot/pythonizations/test/memory.py b/bindings/pyroot/pythonizations/test/memory.py
index da5134616a803..d0b35588388d6 100644
--- a/bindings/pyroot/pythonizations/test/memory.py
+++ b/bindings/pyroot/pythonizations/test/memory.py
@@ -1,5 +1,6 @@
-import gc
import ROOT
+import gc
+import os
import unittest
@@ -43,6 +44,160 @@ class foo {
delta = after - before
self.assertLess(delta, 16)
+ def test_tstyle_memory_management(self):
+ """Regression test for https://github.com/root-project/root/issues/16918"""
+
+ h1 = ROOT.TH1F("h1", "", 100, 0, 10)
+
+ style = ROOT.TStyle("NewSTYLE", "")
+ groot = ROOT.ROOT.GetROOT()
+ groot.SetStyle(style.GetName())
+ groot.ForceStyle()
+
+ def test_tf2_memory_regulation(self):
+ """Regression test for https://github.com/root-project/root/issues/16942"""
+ # The test is just that the memory regulation works correctly and the
+ # application does not segfault
+ f2 = ROOT.TF2("f2", "sin(x)*sin(y)/x/y")
+
+ def test_tf3_memory_regulation(self):
+ """Make sure TF3 is properly managed by the memory regulation logic"""
+ # The test is just that the memory regulation works correctly and the
+ # application does not segfault
+ f3 = ROOT.TF3("f3","[0] * sin(x) + [1] * cos(y) + [2] * z",0,10,0,10,0,10)
+
+ def test_tcolor_memory_regulation(self):
+ """Make sure TColor is properly managed by the memory regulation logic"""
+ # The test is just that the memory regulation works correctly and the
+ # application does not segfault
+ c = ROOT.TColor(42, 42, 42)
+
+ def test_ttree_clone_in_file_context(self):
+ """Test that CloneTree() doesn't give the ownership to Python when
+ TFile is opened."""
+
+ filename = "test_ttree_clone_in_file_context"
+
+ ttree = ROOT.TTree("tree", "tree")
+
+ with ROOT.TFile(filename, "RECREATE") as infile:
+ ttree_clone = ttree.CloneTree()
+
+ os.remove(filename)
+
+ def _check_object_in_subdir(self, klass, args):
+ """
+ Test that an object which automatically registers with a subdirectory
+ does not give ownership to Python
+ """
+ filename = "test_object_in_subdir.root"
+ try:
+ with ROOT.TFile(filename, "recreate") as f:
+ f.mkdir("subdir")
+ f.cd("subdir")
+
+ # Create object by calling the constructor
+ x = klass(*args)
+ x.Write()
+
+ # Create object by using the "virtual constructor" TObject::Clone()
+ x_clone = x.Clone()
+ x_clone.Write()
+ finally:
+ os.remove(filename)
+
+ def test_objects_ownership_with_subdir(self):
+ """
+ Test interaction of various types of objects with automatic directory
+ registration with a subdirectory of a TFile.
+ """
+
+ objs = {
+ "TH1D": ("h", "h", 10, 0, 10),
+ "TH1C": ("h", "h", 10, 0, 10),
+ "TH1S": ("h", "h", 10, 0, 10),
+ "TH1I": ("h", "h", 10, 0, 10),
+ "TH1L": ("h", "h", 10, 0, 10),
+ "TH1F": ("h", "h", 10, 0, 10),
+ "TH1D": ("h", "h", 10, 0, 10),
+ "TH1K": ("h", "h", 10, 0, 10),
+ "TProfile": ("h", "h", 10, 0, 10),
+ "TH2C": ("h", "h", 10, 0, 10, 10, 0, 10),
+ "TH2S": ("h", "h", 10, 0, 10, 10, 0, 10),
+ "TH2I": ("h", "h", 10, 0, 10, 10, 0, 10),
+ "TH2L": ("h", "h", 10, 0, 10, 10, 0, 10),
+ "TH2F": ("h", "h", 10, 0, 10, 10, 0, 10),
+ "TH2D": ("h", "h", 10, 0, 10, 10, 0, 10),
+ "TH2Poly": ("h", "h", 10, 0, 10, 10, 0, 10),
+ "TH2PolyBin": tuple(),
+ "TProfile2D": ("h", "h", 10, 0, 10, 10, 0, 10),
+ "TProfile2PolyBin": tuple(),
+ "TProfile2Poly": ("h", "h", 10, 0, 10, 10, 0, 10),
+ "TH3C": ("h", "h", 10, 0, 10, 10, 0, 10, 10, 0, 10),
+ "TH3S": ("h", "h", 10, 0, 10, 10, 0, 10, 10, 0, 10),
+ "TH3I": ("h", "h", 10, 0, 10, 10, 0, 10, 10, 0, 10),
+ "TH3L": ("h", "h", 10, 0, 10, 10, 0, 10, 10, 0, 10),
+ "TH3F": ("h", "h", 10, 0, 10, 10, 0, 10, 10, 0, 10),
+ "TH3D": ("h", "h", 10, 0, 10, 10, 0, 10, 10, 0, 10),
+ "TProfile3D": ("h", "h", 10, 0, 10, 10, 0, 10, 10, 0, 10),
+ "TGraph2D": (100,),
+ "TEntryList": ("name", "title"),
+ "TEventList": ("name", "title"),
+ "TTree": ("name", "title"),
+ "TNtuple": ("name", "title", "x:y:z"),
+ }
+ for klass, args in objs.items():
+ with self.subTest(klass=klass):
+ self._check_object_in_subdir(getattr(ROOT, klass), args)
+
+ def _check_object_setdirectory(self, klass, classname, args):
+ """
+ Test that registering manually an object with a directory also triggers
+ a release of ownership from Python to C++.
+ """
+ f1 = ROOT.TMemFile(
+ "_check_object_setdirectory_in_memory_file_begin", "recreate")
+
+ x = klass(*args)
+ # TEfficiency does not automatically register with the directory
+ if not classname == "TEfficiency":
+ self.assertIs(x.GetDirectory(), f1)
+ x.SetDirectory(ROOT.nullptr)
+ self.assertFalse(x.GetDirectory())
+ # Make sure that at this point the ownership of the object is with Python
+ ROOT.SetOwnership(x, True)
+
+ f1.Close()
+
+ f2 = ROOT.TMemFile("_check_object_setdirectory_in_memory_file_end", "recreate")
+
+ # The pythonization should trigger the release of ownership to C++
+ x.SetDirectory(f2)
+ self.assertIs(x.GetDirectory(), f2)
+
+ f2.Close()
+
+ def test_objects_interaction_with_setdirectory(self):
+ """
+ Test interaction of various types of objects with manual registration
+ to a directory.
+ """
+
+ objs = {
+ "TH1D": ("h", "h", 10, 0, 10),
+ "TH2D": ("h", "h", 10, 0, 10, 10, 0, 10),
+ "TH3D": ("h", "h", 10, 0, 10, 10, 0, 10, 10, 0, 10),
+ "TGraph2D": (100,),
+ "TEfficiency": (ROOT.TH1D("h1", "h1", 10, 0, 10), ROOT.TH1D("h2", "h2", 10, 0, 10)),
+ "TEntryList": ("name", "title"),
+ "TEventList": ("name", "title"),
+ "TTree": ("name", "title"),
+ "TNtuple": ("name", "title", "x:y:z"),
+ }
+ for classname, args in objs.items():
+ with self.subTest(classname=classname):
+ self._check_object_setdirectory(getattr(ROOT, classname), classname, args)
+
if __name__ == '__main__':
unittest.main()
diff --git a/bindings/pyroot/pythonizations/test/numbadeclare.py b/bindings/pyroot/pythonizations/test/numbadeclare.py
index 08dde20d26d82..0a380523f3ada 100644
--- a/bindings/pyroot/pythonizations/test/numbadeclare.py
+++ b/bindings/pyroot/pythonizations/test/numbadeclare.py
@@ -77,7 +77,7 @@ def fn1(x):
self.assertTrue(hasattr(fn1, "__cpp_wrapper__"))
self.assertTrue(type(fn1.__cpp_wrapper__) == str)
- self.assertEqual(sys.getrefcount(fn1.__cpp_wrapper__), 3)
+ self.assertLessEqual(sys.getrefcount(fn1.__cpp_wrapper__), 3)
self.assertTrue(hasattr(fn1, "__py_wrapper__"))
self.assertTrue(type(fn1.__py_wrapper__) == str)
diff --git a/bindings/pyroot/pythonizations/test/rbatchgenerator_completeness.py b/bindings/pyroot/pythonizations/test/rbatchgenerator_completeness.py
new file mode 100644
index 0000000000000..0d35e0ac01406
--- /dev/null
+++ b/bindings/pyroot/pythonizations/test/rbatchgenerator_completeness.py
@@ -0,0 +1,1000 @@
+import unittest
+import os
+import ROOT
+import numpy as np
+from random import randrange
+
+
+class RBatchGeneratorMultipleFiles(unittest.TestCase):
+
+ file_name1 = "first_half.root"
+ file_name2 = "second_half.root"
+ tree_name = "mytree"
+
+ # default constants
+ n_train_batch = 2
+ n_val_batch = 1
+ val_remainder = 1
+
+ # Helpers
+ def define_rdf(self, num_of_entries=10):
+ df = ROOT.RDataFrame(num_of_entries)\
+ .Define("b1", "(int) rdfentry_")\
+ .Define("b2", "(double) b1*b1")
+
+ return df
+
+ def create_file(self, num_of_entries=10):
+ self.define_rdf(num_of_entries).Snapshot(
+ self.tree_name, self.file_name1)
+
+ def create_5_entries_file(self):
+ df1 = ROOT.RDataFrame(5)\
+ .Define("b1", "(int) rdfentry_ + 10")\
+ .Define("b2", "(double) b1 * b1")\
+ .Snapshot(self.tree_name, self.file_name2)
+
+ def teardown_file(self, file):
+ os.remove(file)
+
+ def test01_each_element_is_generated_unshuffled(self):
+ self.create_file()
+
+ try:
+ df = ROOT.RDataFrame(self.tree_name, self.file_name1)
+
+ gen_train, gen_validation = ROOT.TMVA.Experimental.CreateNumPyGenerators(
+ df,
+ batch_size=3,
+ chunk_size=5,
+ target="b2",
+ validation_split=0.4,
+ shuffle=False,
+ drop_remainder=False
+ )
+
+ results_x_train = [0.0, 1.0, 2.0, 5.0, 6.0, 7.0]
+ results_x_val = [3.0, 4.0, 8.0, 9.0]
+ results_y_train = [0.0, 1.0, 4.0, 25.0, 36.0, 49.0]
+ results_y_val = [9.0, 16.0, 64.0, 81.0]
+
+ collected_x_train = []
+ collected_x_val = []
+ collected_y_train = []
+ collected_y_val = []
+
+ train_iter = iter(gen_train)
+ val_iter = iter(gen_validation)
+
+ for _ in range(self.n_train_batch):
+ x, y = next(train_iter)
+ self.assertTrue(x.shape == (3, 1))
+ self.assertTrue(y.shape == (3, 1))
+ collected_x_train.append(x.tolist())
+ collected_y_train.append(y.tolist())
+
+ for _ in range(self.n_val_batch):
+ x, y = next(val_iter)
+ self.assertTrue(x.shape == (3, 1))
+ self.assertTrue(y.shape == (3, 1))
+ collected_x_val.append(x.tolist())
+ collected_y_val.append(y.tolist())
+
+ x, y = next(val_iter)
+ self.assertTrue(x.shape == (self.val_remainder, 1))
+ self.assertTrue(y.shape == (self.val_remainder, 1))
+ collected_x_val.append(x.tolist())
+ collected_y_val.append(y.tolist())
+
+ flat_x_train = [
+ x for xl in collected_x_train for xs in xl for x in xs]
+ flat_x_val = [x for xl in collected_x_val for xs in xl for x in xs]
+ flat_y_train = [
+ y for yl in collected_y_train for ys in yl for y in ys]
+ flat_y_val = [y for yl in collected_y_val for ys in yl for y in ys]
+
+ self.assertEqual(results_x_train, flat_x_train)
+ self.assertEqual(results_x_val, flat_x_val)
+ self.assertEqual(results_y_train, flat_y_train)
+ self.assertEqual(results_y_val, flat_y_val)
+
+ self.teardown_file(self.file_name1)
+
+ except:
+ self.teardown_file(self.file_name1)
+ raise
+
+ def test02_each_element_is_generated_shuffled(self):
+ self.create_file()
+
+ try:
+ df = ROOT.RDataFrame(self.tree_name, self.file_name1)
+
+ gen_train, gen_validation = ROOT.TMVA.Experimental.CreateNumPyGenerators(
+ df,
+ batch_size=3,
+ chunk_size=5,
+ target="b2",
+ validation_split=0.4,
+ shuffle=True,
+ drop_remainder=False
+ )
+
+ collected_x_train = []
+ collected_x_val = []
+ collected_y_train = []
+ collected_y_val = []
+
+ train_iter = iter(gen_train)
+ val_iter = iter(gen_validation)
+
+ for _ in range(self.n_train_batch):
+ x, y = next(train_iter)
+ self.assertTrue(x.shape == (3, 1))
+ self.assertTrue(y.shape == (3, 1))
+ collected_x_train.append(x.tolist())
+ collected_y_train.append(y.tolist())
+
+ for _ in range(self.n_val_batch):
+ x, y = next(val_iter)
+ self.assertTrue(x.shape == (3, 1))
+ self.assertTrue(y.shape == (3, 1))
+ collected_x_val.append(x.tolist())
+ collected_y_val.append(y.tolist())
+
+ x, y = next(val_iter)
+ self.assertTrue(x.shape == (self.val_remainder, 1))
+ self.assertTrue(y.shape == (self.val_remainder, 1))
+ collected_x_val.append(x.tolist())
+ collected_y_val.append(y.tolist())
+
+ flat_x_train = {
+ x for xl in collected_x_train for xs in xl for x in xs}
+ flat_x_val = {x for xl in collected_x_val for xs in xl for x in xs}
+ flat_y_train = {
+ y for yl in collected_y_train for ys in yl for y in ys}
+ flat_y_val = {y for yl in collected_y_val for ys in yl for y in ys}
+
+ self.assertEqual(len(flat_x_train), 6)
+ self.assertEqual(len(flat_x_val), 4)
+ self.assertEqual(len(flat_y_train), 6)
+ self.assertEqual(len(flat_y_val), 4)
+
+ self.teardown_file(self.file_name1)
+
+ except:
+ self.teardown_file(self.file_name1)
+ raise
+
+ def test03_chunk_input_smaller_than_batch_size(self):
+ """Checking for the situation when the batch can only be created after
+ more than two chunks. If not, segmentation fault will arise"""
+ self.create_file()
+
+ try:
+ df = ROOT.RDataFrame(self.tree_name, self.file_name1)
+
+ gen_train, gen_validation = ROOT.TMVA.Experimental.CreateNumPyGenerators(
+ df,
+ batch_size=3,
+ chunk_size=3,
+ target="b2",
+ validation_split=0.4,
+ shuffle=False,
+ drop_remainder=False
+ )
+
+ next(iter(gen_train))
+
+ self.teardown_file(self.file_name1)
+
+ except:
+ self.teardown_file(self.file_name1)
+ raise
+
+ def test04_dropping_remainder(self):
+ self.create_file()
+
+ try:
+ df = ROOT.RDataFrame(self.tree_name, self.file_name1)
+
+ gen_train, gen_validation = ROOT.TMVA.Experimental.CreateNumPyGenerators(
+ df,
+ batch_size=3,
+ chunk_size=5,
+ target="b2",
+ validation_split=0.4,
+ shuffle=False,
+ drop_remainder=True
+ )
+
+ collected_x = []
+ collected_y = []
+
+ for x, y in gen_train:
+ self.assertTrue(x.shape == (3, 1))
+ self.assertTrue(y.shape == (3, 1))
+ collected_x.append(x)
+ collected_y.append(y)
+
+ for x, y in gen_validation:
+ self.assertTrue(x.shape == (3, 1))
+ self.assertTrue(y.shape == (3, 1))
+ collected_x.append(x)
+ collected_y.append(y)
+
+ self.assertEqual(len(collected_x), 3)
+ self.assertEqual(len(collected_y), 3)
+
+ self.teardown_file(self.file_name1)
+
+ except:
+ self.teardown_file(self.file_name1)
+ raise
+
+ def test05_more_than_one_file(self):
+ self.create_file()
+ self.create_5_entries_file()
+
+ try:
+ df = ROOT.RDataFrame(
+ self.tree_name, [self.file_name1, self.file_name2])
+
+ gen_train, gen_validation = ROOT.TMVA.Experimental.CreateNumPyGenerators(
+ df,
+ batch_size=3,
+ chunk_size=5,
+ target="b2",
+ validation_split=0.4,
+ shuffle=False,
+ drop_remainder=False
+ )
+
+ results_x_train = [0.0, 1.0, 2.0, 5.0, 6.0, 7.0, 10.0, 11.0, 12.0]
+ results_x_val = [3.0, 4.0, 8.0, 9.0, 13.0, 14.0]
+ results_y_train = [0.0, 1.0, 4.0, 25.0,
+ 36.0, 49.0, 100.0, 121.0, 144.0]
+ results_y_val = [9.0, 16.0, 64.0, 81.0, 169.0, 196.0]
+
+ collected_x_train = []
+ collected_x_val = []
+ collected_y_train = []
+ collected_y_val = []
+
+ for x, y in gen_train:
+ self.assertTrue(x.shape == (3, 1))
+ self.assertTrue(y.shape == (3, 1))
+ collected_x_train.append(x.tolist())
+ collected_y_train.append(y.tolist())
+
+ for x, y in gen_validation:
+ self.assertTrue(x.shape == (3, 1))
+ self.assertTrue(y.shape == (3, 1))
+ collected_x_val.append(x.tolist())
+ collected_y_val.append(y.tolist())
+
+ flat_x_train = [
+ x for xl in collected_x_train for xs in xl for x in xs]
+ flat_x_val = [x for xl in collected_x_val for xs in xl for x in xs]
+ flat_y_train = [
+ y for yl in collected_y_train for ys in yl for y in ys]
+ flat_y_val = [y for yl in collected_y_val for ys in yl for y in ys]
+
+ self.assertEqual(results_x_train, flat_x_train)
+ self.assertEqual(results_x_val, flat_x_val)
+ self.assertEqual(results_y_train, flat_y_train)
+ self.assertEqual(results_y_val, flat_y_val)
+
+ self.teardown_file(self.file_name1)
+ self.teardown_file(self.file_name2)
+
+ except:
+ self.teardown_file(self.file_name1)
+ self.teardown_file(self.file_name2)
+ raise
+
+ def test06_multiple_target_columns(self):
+ file_name = "multiple_target_columns.root"
+
+ ROOT.RDataFrame(10)\
+ .Define("b1", "(Short_t) rdfentry_")\
+ .Define("b2", "(UShort_t) b1 * b1")\
+ .Define("b3", "(double) rdfentry_ * 10")\
+ .Define("b4", "(double) b3 * 10")\
+ .Snapshot("myTree", file_name)
+ try:
+ df = ROOT.RDataFrame("myTree", file_name)
+
+ gen_train, gen_validation = ROOT.TMVA.Experimental.CreateNumPyGenerators(
+ df,
+ batch_size=3,
+ chunk_size=5,
+ target=["b2", "b4"],
+ weights="b3",
+ validation_split=0.4,
+ shuffle=False,
+ drop_remainder=False
+ )
+
+ results_x_train = [0.0, 1.0, 2.0, 5.0, 6.0, 7.0]
+ results_x_val = [3.0, 4.0, 8.0, 9.0]
+ results_y_train = [0.0, 0.0, 1.0, 100.0, 4.0,
+ 200.0, 25.0, 500.0, 36.0, 600.0, 49.0, 700.0]
+ results_y_val = [9.0, 300.0, 16.0, 400.0, 64.0, 800.0, 81.0, 900.0]
+ results_z_train = [0.0, 10.0, 20.0, 50.0, 60.0, 70.0]
+ results_z_val = [30.0, 40.0, 80.0, 90.0]
+
+ collected_x_train = []
+ collected_x_val = []
+ collected_y_train = []
+ collected_y_val = []
+ collected_z_train = []
+ collected_z_val = []
+
+ iter_train = iter(gen_train)
+ iter_val = iter(gen_validation)
+
+ for _ in range(self.n_train_batch):
+ x, y, z = next(iter_train)
+ self.assertTrue(x.shape == (3, 1))
+ self.assertTrue(y.shape == (3, 2))
+ self.assertTrue(z.shape == (3, 1))
+ collected_x_train.append(x.tolist())
+ collected_y_train.append(y.tolist())
+ collected_z_train.append(z.tolist())
+
+ for _ in range(self.n_val_batch):
+ x, y, z = next(iter_val)
+ self.assertTrue(x.shape == (3, 1))
+ self.assertTrue(y.shape == (3, 2))
+ self.assertTrue(z.shape == (3, 1))
+ collected_x_val.append(x.tolist())
+ collected_y_val.append(y.tolist())
+ collected_z_val.append(z.tolist())
+
+ x, y, z = next(iter_val)
+ self.assertTrue(x.shape == (self.val_remainder, 1))
+ self.assertTrue(y.shape == (self.val_remainder, 2))
+ self.assertTrue(z.shape == (self.val_remainder, 1))
+ collected_x_val.append(x.tolist())
+ collected_y_val.append(y.tolist())
+ collected_z_val.append(z.tolist())
+
+ flat_x_train = [
+ x for xl in collected_x_train for xs in xl for x in xs]
+ flat_x_val = [x for xl in collected_x_val for xs in xl for x in xs]
+ flat_y_train = [
+ y for yl in collected_y_train for ys in yl for y in ys]
+ flat_y_val = [y for yl in collected_y_val for ys in yl for y in ys]
+ flat_z_train = [
+ z for zl in collected_z_train for zs in zl for z in zs]
+ flat_z_val = [z for zl in collected_z_val for zs in zl for z in zs]
+
+ self.assertEqual(results_x_train, flat_x_train)
+ self.assertEqual(results_x_val, flat_x_val)
+ self.assertEqual(results_y_train, flat_y_train)
+ self.assertEqual(results_y_val, flat_y_val)
+ self.assertEqual(results_z_train, flat_z_train)
+ self.assertEqual(results_z_val, flat_z_val)
+
+ self.teardown_file(file_name)
+
+ except:
+ self.teardown_file(file_name)
+ raise
+
+ def test07_multiple_input_columns(self):
+ file_name = "multiple_input_columns.root"
+
+ ROOT.RDataFrame(10)\
+ .Define("b1", "(Short_t) rdfentry_")\
+ .Define("b2", "(UShort_t) b1 * b1")\
+ .Define("b3", "(double) rdfentry_ * 10")\
+ .Snapshot("myTree", file_name)
+
+ try:
+ df = ROOT.RDataFrame("myTree", file_name)
+
+ gen_train, gen_validation = ROOT.TMVA.Experimental.CreateNumPyGenerators(
+ df,
+ batch_size=3,
+ chunk_size=5,
+ target="b2",
+ validation_split=0.4,
+ shuffle=False,
+ drop_remainder=False
+ )
+
+ results_x_train = [0.0, 0.0, 1.0, 10.0, 2.0,
+ 20.0, 5.0, 50.0, 6.0, 60.0, 7.0, 70.0]
+ results_x_val = [3.0, 30.0, 4.0, 40.0, 8.0, 80.0, 9.0, 90.0]
+ results_y_train = [0.0, 1.0, 4.0, 25.0, 36.0, 49.]
+ results_y_val = [9.0, 16.0, 64.0, 81.0]
+
+ collected_x_train = []
+ collected_x_val = []
+ collected_y_train = []
+ collected_y_val = []
+
+ iter_train = iter(gen_train)
+ iter_val = iter(gen_validation)
+
+ for _ in range(self.n_train_batch):
+ x, y = next(iter_train)
+ self.assertTrue(x.shape == (3, 2))
+ self.assertTrue(y.shape == (3, 1))
+ collected_x_train.append(x.tolist())
+ collected_y_train.append(y.tolist())
+
+ for _ in range(self.n_val_batch):
+ x, y = next(iter_val)
+ self.assertTrue(x.shape == (3, 2))
+ self.assertTrue(y.shape == (3, 1))
+ collected_x_val.append(x.tolist())
+ collected_y_val.append(y.tolist())
+
+ x, y = next(iter_val)
+ self.assertTrue(x.shape == (self.val_remainder, 2))
+ self.assertTrue(y.shape == (self.val_remainder, 1))
+ collected_x_val.append(x.tolist())
+ collected_y_val.append(y.tolist())
+
+ flat_x_train = [
+ x for xl in collected_x_train for xs in xl for x in xs]
+ flat_x_val = [x for xl in collected_x_val for xs in xl for x in xs]
+ flat_y_train = [
+ y for yl in collected_y_train for ys in yl for y in ys]
+ flat_y_val = [y for yl in collected_y_val for ys in yl for y in ys]
+
+ self.assertEqual(results_x_train, flat_x_train)
+ self.assertEqual(results_x_val, flat_x_val)
+ self.assertEqual(results_y_train, flat_y_train)
+ self.assertEqual(results_y_val, flat_y_val)
+
+ self.teardown_file(file_name)
+
+ except:
+ self.teardown_file(file_name)
+ raise
+
+ def test08_filtered(self):
+ self.create_file()
+
+ try:
+ df = ROOT.RDataFrame(self.tree_name, self.file_name1)
+
+ dff = df.Filter("b1 % 2 == 0", "name")
+
+ gen_train, gen_validation = ROOT.TMVA.Experimental.CreateNumPyGenerators(
+ dff,
+ batch_size=3,
+ chunk_size=5,
+ target="b2",
+ validation_split=0.4,
+ shuffle=False,
+ drop_remainder=False
+ )
+
+ results_x_train = [0.0, 2.0, 4.0]
+ results_x_val = [6.0, 8.0]
+ results_y_train = [0.0, 4.0, 16.0]
+ results_y_val = [36.0, 64.0]
+
+ collected_x_train = []
+ collected_x_val = []
+ collected_y_train = []
+ collected_y_val = []
+
+ train_iter = iter(gen_train)
+ val_iter = iter(gen_validation)
+
+ x, y = next(train_iter)
+ self.assertTrue(x.shape == (3, 1))
+ self.assertTrue(y.shape == (3, 1))
+ collected_x_train.append(x.tolist())
+ collected_y_train.append(y.tolist())
+
+ x, y = next(val_iter)
+ self.assertTrue(x.shape == (2, 1))
+ self.assertTrue(y.shape == (2, 1))
+ collected_x_val.append(x.tolist())
+ collected_y_val.append(y.tolist())
+
+ flat_x_train = [
+ x for xl in collected_x_train for xs in xl for x in xs]
+ flat_x_val = [x for xl in collected_x_val for xs in xl for x in xs]
+ flat_y_train = [
+ y for yl in collected_y_train for ys in yl for y in ys]
+ flat_y_val = [y for yl in collected_y_val for ys in yl for y in ys]
+
+ self.assertEqual(results_x_train, flat_x_train)
+ self.assertEqual(results_x_val, flat_x_val)
+ self.assertEqual(results_y_train, flat_y_train)
+ self.assertEqual(results_y_val, flat_y_val)
+
+ self.teardown_file(self.file_name1)
+
+ except:
+ self.teardown_file(self.file_name1)
+ raise
+
+ def test09_filtered_last_chunk(self):
+ file_name = "filtered_last_chunk.root"
+ tree_name = "myTree"
+
+ ROOT.RDataFrame(20)\
+ .Define("b1", "(Short_t) rdfentry_")\
+ .Define("b2", "(UShort_t) b1 * b1")\
+ .Snapshot(tree_name, file_name)
+
+ try:
+ df = ROOT.RDataFrame(tree_name, file_name)
+
+ dff = df.Filter("b1 % 2 == 0", "name")
+
+ gen_train, _ = ROOT.TMVA.Experimental.CreateNumPyGenerators(
+ dff,
+ batch_size=3,
+ chunk_size=9,
+ target="b2",
+ validation_split=0,
+ shuffle=False,
+ drop_remainder=False
+ )
+
+ results_x_train = [0.0, 2.0, 4.0, 6.0,
+ 8.0, 10.0, 12.0, 14.0, 16.0, 18.0]
+ results_y_train = [0.0, 4.0, 16.0, 36.0,
+ 64.0, 100.0, 144.0, 196.0, 256.0, 324.0]
+
+ collected_x_train = []
+ collected_y_train = []
+
+ train_iter = iter(gen_train)
+
+ for _ in range(3):
+ x, y = next(train_iter)
+ self.assertTrue(x.shape == (3, 1))
+ self.assertTrue(y.shape == (3, 1))
+ collected_x_train.append(x.tolist())
+ collected_y_train.append(y.tolist())
+
+ x, y = next(train_iter)
+ self.assertTrue(x.shape == (1, 1))
+ self.assertTrue(y.shape == (1, 1))
+ collected_x_train.append(x.tolist())
+ collected_y_train.append(y.tolist())
+
+ flat_x_train = [
+ x for xl in collected_x_train for xs in xl for x in xs]
+ flat_y_train = [
+ y for yl in collected_y_train for ys in yl for y in ys]
+
+ self.assertEqual(results_x_train, flat_x_train)
+ self.assertEqual(results_y_train, flat_y_train)
+
+ self.teardown_file(file_name)
+
+ except:
+ self.teardown_file(file_name)
+ raise
+
+ def test10_two_epochs_shuffled(self):
+ self.create_file()
+
+ try:
+ df = ROOT.RDataFrame(self.tree_name, self.file_name1)
+
+ gen_train, gen_validation = ROOT.TMVA.Experimental.CreateNumPyGenerators(
+ df,
+ batch_size=3,
+ chunk_size=5,
+ target="b2",
+ validation_split=0.4,
+ shuffle=True,
+ drop_remainder=False
+ )
+
+ both_epochs_collected_x_val = []
+ both_epochs_collected_y_val = []
+
+ for _ in range(2):
+ collected_x_train = []
+ collected_x_val = []
+ collected_y_train = []
+ collected_y_val = []
+
+ iter_train = iter(gen_train)
+ iter_val = iter(gen_validation)
+
+ for _ in range(self.n_train_batch):
+ x, y = next(iter_train)
+ self.assertTrue(x.shape == (3, 1))
+ self.assertTrue(y.shape == (3, 1))
+ collected_x_train.append(x.tolist())
+ collected_y_train.append(y.tolist())
+
+ for _ in range(self.n_val_batch):
+ x, y = next(iter_val)
+ self.assertTrue(x.shape == (3, 1))
+ self.assertTrue(y.shape == (3, 1))
+ collected_x_val.append(x.tolist())
+ collected_y_val.append(y.tolist())
+
+ x, y = next(iter_val)
+ self.assertTrue(x.shape == (self.val_remainder, 1))
+ self.assertTrue(y.shape == (self.val_remainder, 1))
+ collected_x_val.append(x.tolist())
+ collected_y_val.append(y.tolist())
+
+ flat_x_train = {
+ x for xl in collected_x_train for xs in xl for x in xs}
+ flat_x_val = {
+ x for xl in collected_x_val for xs in xl for x in xs}
+ flat_y_train = {
+ y for yl in collected_y_train for ys in yl for y in ys}
+ flat_y_val = {
+ y for yl in collected_y_val for ys in yl for y in ys}
+
+ self.assertEqual(len(flat_x_train), 6)
+ self.assertEqual(len(flat_x_val), 4)
+ self.assertEqual(len(flat_y_train), 6)
+ self.assertEqual(len(flat_y_val), 4)
+
+ both_epochs_collected_x_val.append(collected_x_val)
+ both_epochs_collected_y_val.append(collected_y_val)
+
+ self.assertEqual(
+ both_epochs_collected_x_val[0], both_epochs_collected_x_val[1])
+ self.assertEqual(
+ both_epochs_collected_y_val[0], both_epochs_collected_y_val[1])
+ finally:
+ self.teardown_file(self.file_name1)
+
+ def test11_number_of_training_and_validation_batches_remainder(self):
+ self.create_file()
+
+ try:
+ df = ROOT.RDataFrame(self.tree_name, self.file_name1)
+
+ gen_train, gen_validation = ROOT.TMVA.Experimental.CreateNumPyGenerators(
+ df,
+ batch_size=3,
+ chunk_size=5,
+ target="b2",
+ validation_split=0.4,
+ shuffle=False,
+ drop_remainder=False
+ )
+
+ number_of_training_batches = 0
+ number_of_validation_batches = 0
+
+ for _ in gen_train:
+ number_of_training_batches += 1
+
+ for _ in gen_validation:
+ number_of_validation_batches += 1
+
+ self.assertEqual(gen_train.number_of_batches,
+ number_of_training_batches)
+ self.assertEqual(gen_validation.number_of_batches,
+ number_of_validation_batches)
+ self.assertEqual(gen_train.last_batch_no_of_rows, 0)
+ self.assertEqual(gen_validation.last_batch_no_of_rows, 1)
+
+ self.teardown_file(self.file_name1)
+
+ except:
+ self.teardown_file(self.file_name1)
+ raise
+
+ def test12_PyTorch(self):
+ import torch
+
+ file_name = "multiple_target_columns.root"
+
+ ROOT.RDataFrame(10)\
+ .Define("b1", "(Short_t) rdfentry_")\
+ .Define("b2", "(UShort_t) b1 * b1")\
+ .Define("b3", "(double) rdfentry_ * 10")\
+ .Define("b4", "(double) b3 * 10")\
+ .Snapshot("myTree", file_name)
+
+ try:
+ df = ROOT.RDataFrame("myTree", file_name)
+
+ gen_train, gen_validation = ROOT.TMVA.Experimental.CreatePyTorchGenerators(
+ df,
+ batch_size=3,
+ chunk_size=5,
+ target=["b2", "b4"],
+ weights="b3",
+ validation_split=0.4,
+ shuffle=False,
+ drop_remainder=False
+ )
+
+ results_x_train = [0.0, 1.0, 2.0, 5.0, 6.0, 7.0]
+ results_x_val = [3.0, 4.0, 8.0, 9.0]
+ results_y_train = [0.0, 0.0, 1.0, 100.0, 4.0,
+ 200.0, 25.0, 500.0, 36.0, 600.0, 49.0, 700.0]
+ results_y_val = [9.0, 300.0, 16.0, 400.0, 64.0, 800.0, 81.0, 900.0]
+ results_z_train = [0.0, 10.0, 20.0, 50.0, 60.0, 70.0]
+ results_z_val = [30.0, 40.0, 80.0, 90.0]
+
+ collected_x_train = []
+ collected_x_val = []
+ collected_y_train = []
+ collected_y_val = []
+ collected_z_train = []
+ collected_z_val = []
+
+ iter_train = iter(gen_train)
+ iter_val = iter(gen_validation)
+
+ for _ in range(self.n_train_batch):
+ x, y, z = next(iter_train)
+ self.assertTrue(x.shape == (3, 1))
+ self.assertTrue(y.shape == (3, 2))
+ self.assertTrue(z.shape == (3, 1))
+ collected_x_train.append(x.tolist())
+ collected_y_train.append(y.tolist())
+ collected_z_train.append(z.tolist())
+
+ for _ in range(self.n_val_batch):
+ x, y, z = next(iter_val)
+ self.assertTrue(x.shape == (3, 1))
+ self.assertTrue(y.shape == (3, 2))
+ self.assertTrue(z.shape == (3, 1))
+ collected_x_val.append(x.tolist())
+ collected_y_val.append(y.tolist())
+ collected_z_val.append(z.tolist())
+
+ x, y, z = next(iter_val)
+ self.assertTrue(x.shape == (self.val_remainder, 1))
+ self.assertTrue(y.shape == (self.val_remainder, 2))
+ self.assertTrue(z.shape == (self.val_remainder, 1))
+ collected_x_val.append(x.tolist())
+ collected_y_val.append(y.tolist())
+ collected_z_val.append(z.tolist())
+
+ flat_x_train = [
+ x for xl in collected_x_train for xs in xl for x in xs]
+ flat_x_val = [x for xl in collected_x_val for xs in xl for x in xs]
+ flat_y_train = [
+ y for yl in collected_y_train for ys in yl for y in ys]
+ flat_y_val = [y for yl in collected_y_val for ys in yl for y in ys]
+ flat_z_train = [
+ z for zl in collected_z_train for zs in zl for z in zs]
+ flat_z_val = [z for zl in collected_z_val for zs in zl for z in zs]
+
+ self.assertEqual(results_x_train, flat_x_train)
+ self.assertEqual(results_x_val, flat_x_val)
+ self.assertEqual(results_y_train, flat_y_train)
+ self.assertEqual(results_y_val, flat_y_val)
+ self.assertEqual(results_z_train, flat_z_train)
+ self.assertEqual(results_z_val, flat_z_val)
+
+ self.teardown_file(file_name)
+
+ except:
+ self.teardown_file(file_name)
+ raise
+
+ def test13_TensorFlow(self):
+ import tensorflow as tf
+
+ file_name = "multiple_target_columns.root"
+
+ ROOT.RDataFrame(10)\
+ .Define("b1", "(Short_t) rdfentry_")\
+ .Define("b2", "(UShort_t) b1 * b1")\
+ .Define("b3", "(double) rdfentry_ * 10")\
+ .Define("b4", "(double) b3 * 10")\
+ .Snapshot("myTree", file_name)
+
+ try:
+ df = ROOT.RDataFrame("myTree", file_name)
+
+ gen_train, gen_validation = ROOT.TMVA.Experimental.CreateTFDatasets(
+ df,
+ batch_size=3,
+ chunk_size=5,
+ target=["b2", "b4"],
+ weights="b3",
+ validation_split=0.4,
+ shuffle=False,
+ drop_remainder=False
+ )
+
+ results_x_train = [0.0, 1.0, 2.0, 5.0, 6.0, 7.0]
+ results_x_val = [3.0, 4.0, 8.0, 9.0, 0.0, 0.0]
+ results_y_train = [0.0, 0.0, 1.0, 100.0, 4.0,
+ 200.0, 25.0, 500.0, 36.0, 600.0, 49.0, 700.0]
+ results_y_val = [9.0, 300.0, 16.0, 400.0, 64.0,
+ 800.0, 81.0, 900.0, 0.0, 0.0, 0.0, 0.0]
+ results_z_train = [0.0, 10.0, 20.0, 50.0, 60.0, 70.0]
+ results_z_val = [30.0, 40.0, 80.0, 90.0, 0.0, 0.0]
+
+ collected_x_train = []
+ collected_x_val = []
+ collected_y_train = []
+ collected_y_val = []
+ collected_z_train = []
+ collected_z_val = []
+
+ iter_train = iter(gen_train)
+ iter_val = iter(gen_validation)
+
+ for _ in range(self.n_train_batch):
+ x, y, z = next(iter_train)
+ self.assertTrue(x.shape == (3, 1))
+ self.assertTrue(y.shape == (3, 2))
+ self.assertTrue(z.shape == (3, 1))
+ collected_x_train.append(x.numpy().tolist())
+ collected_y_train.append(y.numpy().tolist())
+ collected_z_train.append(z.numpy().tolist())
+
+ for _ in range(self.n_val_batch):
+ x, y, z = next(iter_val)
+ self.assertTrue(x.shape == (3, 1))
+ self.assertTrue(y.shape == (3, 2))
+ self.assertTrue(z.shape == (3, 1))
+ collected_x_val.append(x.numpy().tolist())
+ collected_y_val.append(y.numpy().tolist())
+ collected_z_val.append(z.numpy().tolist())
+
+ x, y, z = next(iter_val)
+ self.assertTrue(x.shape == (3, 1))
+ self.assertTrue(y.shape == (3, 2))
+ self.assertTrue(z.shape == (3, 1))
+ collected_x_val.append(x.numpy().tolist())
+ collected_y_val.append(y.numpy().tolist())
+ collected_z_val.append(z.numpy().tolist())
+
+ flat_x_train = [
+ x for xl in collected_x_train for xs in xl for x in xs]
+ flat_x_val = [x for xl in collected_x_val for xs in xl for x in xs]
+ flat_y_train = [
+ y for yl in collected_y_train for ys in yl for y in ys]
+ flat_y_val = [y for yl in collected_y_val for ys in yl for y in ys]
+ flat_z_train = [
+ z for zl in collected_z_train for zs in zl for z in zs]
+ flat_z_val = [z for zl in collected_z_val for zs in zl for z in zs]
+
+ self.assertEqual(results_x_train, flat_x_train)
+ self.assertEqual(results_x_val, flat_x_val)
+ self.assertEqual(results_y_train, flat_y_train)
+ self.assertEqual(results_y_val, flat_y_val)
+ self.assertEqual(results_z_train, flat_z_train)
+ self.assertEqual(results_z_val, flat_z_val)
+
+ self.teardown_file(file_name)
+
+ except:
+ self.teardown_file(file_name)
+ raise
+
+ def test14_big_data(self):
+ file_name = "big_data.root"
+ tree_name = "myTree"
+
+ entries_in_rdf = randrange(10000, 30000)
+ chunk_size = randrange(1000, 3001)
+ batch_size = randrange(100, 501)
+
+ error_message = f"\n Batch size: {batch_size} Chunk size: {chunk_size}\
+ Number of entries: {entries_in_rdf}"
+
+ def define_rdf(num_of_entries):
+ ROOT.RDataFrame(num_of_entries)\
+ .Define("b1", "(int) rdfentry_")\
+ .Define("b2", "(double) rdfentry_ * 2")\
+ .Define("b3", "(int) rdfentry_ + 10192")\
+ .Define("b4", "(int) -rdfentry_")\
+ .Define("b5", "(double) -rdfentry_ - 10192")\
+ .Snapshot(tree_name, file_name)
+
+ def test(size_of_batch, size_of_chunk, num_of_entries):
+ define_rdf(num_of_entries)
+
+ try:
+ df = ROOT.RDataFrame(tree_name, file_name)
+
+ gen_train, gen_validation = ROOT.TMVA.Experimental.CreateNumPyGenerators(
+ df,
+ batch_size=size_of_batch,
+ chunk_size=size_of_chunk,
+ target=["b3", "b5"],
+ weights="b2",
+ validation_split=0.3,
+ shuffle=False,
+ drop_remainder=False
+ )
+
+ collect_x = []
+
+ train_remainder = gen_train.last_batch_no_of_rows
+ val_remainder = gen_validation.last_batch_no_of_rows
+
+ n_train_batches = gen_train.number_of_batches - \
+ 1 if train_remainder else gen_train.number_of_batches
+ n_val_batches = gen_validation.number_of_batches - \
+ 1 if val_remainder else gen_validation.number_of_batches
+
+ iter_train = iter(gen_train)
+ iter_val = iter(gen_validation)
+
+ for i in range(n_train_batches):
+ x, y, z = next(iter_train)
+
+ self.assertTrue(x.shape == (size_of_batch, 2),
+ error_message + f" row: {i} x shape: {x.shape}")
+ self.assertTrue(y.shape == (size_of_batch, 2),
+ error_message + f" row: {i} y shape: {y.shape}")
+ self.assertTrue(z.shape == (size_of_batch, 1),
+ error_message + f" row: {i} z shape: {z.shape}")
+
+ self.assertTrue(
+ np.all(x[:, 0]*(-1) == x[:, 1]), error_message + f" row: {i}")
+ self.assertTrue(
+ np.all(x[:, 0]+10192 == y[:, 0]), error_message + f" row: {i}")
+ # self.assertTrue(np.all(x[:,0]*(-1)-10192==y[:,1]), error_message)
+ self.assertTrue(
+ np.all(x[:, 0]*2 == z[:, 0]), error_message + f" row: {i}")
+
+ collect_x.extend(list(x[:, 0]))
+
+ if train_remainder:
+ x, y, z = next(iter_train)
+ self.assertTrue(x.shape == (
+ train_remainder, 2), error_message)
+ self.assertTrue(y.shape == (
+ train_remainder, 2), error_message)
+ self.assertTrue(z.shape == (
+ train_remainder, 1), error_message)
+ collect_x.extend(list(x[:, 0]))
+
+ for _ in range(n_val_batches):
+ x, y, z = next(iter_val)
+
+ self.assertTrue(x.shape == (size_of_batch, 2),
+ error_message + f" row: {i} x shape: {x.shape}")
+ self.assertTrue(y.shape == (size_of_batch, 2),
+ error_message + f" row: {i} y shape: {y.shape}")
+ self.assertTrue(z.shape == (size_of_batch, 1),
+ error_message + f" row: {i} z shape: {z.shape}")
+
+ self.assertTrue(
+ np.all(x[:, 0]*(-1) == x[:, 1]), error_message)
+ self.assertTrue(
+ np.all(x[:, 0]+10192 == y[:, 0]), error_message)
+ # self.assertTrue(np.all(x[:,0]*(-1)-10192==y[:,1]), error_message)
+ self.assertTrue(
+ np.all(x[:, 0]*2 == z[:, 0]), error_message)
+
+ collect_x.extend(list(x[:, 0]))
+
+ if val_remainder:
+ x, y, z = next(iter_val)
+ self.assertTrue(x.shape == (
+ val_remainder, 2), error_message)
+ self.assertTrue(y.shape == (
+ val_remainder, 2), error_message)
+ self.assertTrue(z.shape == (
+ val_remainder, 1), error_message)
+ collect_x.extend(list(x[:, 0]))
+
+ self.assertTrue(set(collect_x) == set(i for i in range(num_of_entries)), f"collected length: {len(set(collect_x))}\
+ generated length {len(set(i for i in range(num_of_entries)))}")
+
+ except:
+ self.teardown_file(file_name)
+ raise
+
+ test(batch_size, chunk_size, entries_in_rdf)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/bindings/pyroot/pythonizations/test/rdf_define_pyz.py b/bindings/pyroot/pythonizations/test/rdf_define_pyz.py
index f67d469f1f7da..3478fdc05074c 100644
--- a/bindings/pyroot/pythonizations/test/rdf_define_pyz.py
+++ b/bindings/pyroot/pythonizations/test/rdf_define_pyz.py
@@ -7,70 +7,6 @@ class PyDefine(unittest.TestCase):
Testing Pythonized Define of RDF
"""
- def test_with_dtypes(self):
- """
- Tests the pythonized define with all the numba declare datatypes and
- """
- numba_declare_dtypes = ['float', 'double', 'int', 'unsigned int', 'long', 'unsigned long', 'bool']
- rdf = ROOT.RDataFrame(10)
- for type in numba_declare_dtypes:
- col_name = "col_" + type.replace(" ","")
- rdf = rdf.Define(col_name, f"({type}) rdfentry_")
- rdf = rdf.Define(col_name + "_arr", lambda col: np.array([col,col]), [col_name])
- arr = np.arange(0, 10)
- if type == 'bool':
- arr = np.array(arr, dtype='bool')
- flag1 = np.array_equal(rdf.AsNumpy()[col_name], arr)
- flag2 = True
- for idx, entry in enumerate(rdf.AsNumpy()[col_name + "_arr"]):
- if not (entry[0] == arr[idx] and entry[1] == arr[idx]):
- flag2 = False
- self.assertTrue(flag1 and flag2)
-
- def test_define_overload1(self):
- rdf = ROOT.RDataFrame(10).Define("x", "rdfentry_")
- rdf = rdf.Define("x2", lambda y: y*y, ["x"])
- arr = np.arange(0, 10)
- flag = np.array_equal(rdf.AsNumpy()["x2"], arr*arr)
- self.assertTrue(flag)
-
- def test_define_overload2(self):
- rdf = ROOT.RDataFrame(10).Define("x", "rdfentry_")
- rdf = rdf.Define("x2", lambda x: x*x)
- arr = np.arange(0, 10)
- flag = np.array_equal(rdf.AsNumpy()["x2"], arr*arr)
- self.assertTrue(flag)
-
- def test_define_extra_args(self):
- rdf = ROOT.RDataFrame(10).Define("x", "rdfentry_")
- def x_y(x, y):
- return x*y
- rdf = rdf.Define("x_y", x_y , extra_args = {"y": 0.5})
- arr = np.arange(0, 10)
- flag = np.array_equal(rdf.AsNumpy()["x_y"], arr*0.5)
- self.assertTrue(flag)
-
- def test_capture_from_scope(self):
- rdf = ROOT.RDataFrame(10).Define("x", "rdfentry_")
- y = 0.5
- def x_times_y(x):
- return x*y
- rdf = rdf.Define("x_y", x_times_y )
- arr = np.arange(0, 10)
- flag = np.array_equal(rdf.AsNumpy()["x_y"], arr*0.5)
- self.assertTrue(flag)
-
- def test_arrays(self):
- rdf = ROOT.RDataFrame(5).Define("x", "rdfentry_")
- rdf = rdf.Define("x_arr", lambda x: np.array([x, x]))
- def norm(x_arr):
- return np.sqrt(x_arr[0]**2 + x_arr[1]**2)
- rdf = rdf.Define("mag", norm)
- arr = np.arange(0, 5)
- arr = np.sqrt(arr*arr + arr*arr )
- flag = np.array_equal(rdf.AsNumpy()["mag"], arr)
- self.assertTrue(flag)
-
def test_cpp_functor(self):
"""
Test that a C++ functor can be passed as a callable argument of a
diff --git a/bindings/pyroot/pythonizations/test/rdf_filter_pyz.py b/bindings/pyroot/pythonizations/test/rdf_filter_pyz.py
index ea9daefa56562..9cdc9df28ebb8 100755
--- a/bindings/pyroot/pythonizations/test/rdf_filter_pyz.py
+++ b/bindings/pyroot/pythonizations/test/rdf_filter_pyz.py
@@ -3,90 +3,12 @@
import numpy as np
import os
-from rdf_filter_pyz_helper import CreateData, TYPE_TO_SYMBOL, filter_dict
class PyFilter(unittest.TestCase):
"""
Testing Pythonized Filters of RDF
"""
- def test_with_dtypes(self):
- """
- Tests the pythonized filter with all the tree datatypes and
- """
- CreateData()
- rdf = ROOT.RDataFrame("TestData", "./RDF_Filter_Pyz_TestData.root")
- test_cols =[str(c) for c in rdf.GetColumnNames()]
- for col_name in test_cols:
- func = filter_dict[TYPE_TO_SYMBOL[col_name]] # filter function
- x = rdf.Mean(col_name).GetValue()
- if col_name == 'Bool_t': x = True
- filtered = rdf.Filter(func, extra_args = {'x':x})
- res_root = filtered.AsNumpy()[col_name]
- if not isinstance(x, bool):
- filtered2 = rdf.Filter(f"{col_name} > {x}")
- else:
- if x:
- filtered2 = rdf.Filter(f"{col_name} == true")
- else:
- filtered2 = rdf.Filter(f"{col_name} == false")
- res_root2 = filtered2.AsNumpy()[col_name]
- self.assertTrue(np.array_equal(res_root,res_root2))
-
- os.remove("./RDF_Filter_Pyz_TestData.root")
-
- # CPP Overload 1: Filter(callable, col_list = [], name = "") => 3 Possibilities
- def test_filter_overload1_a(self):
- """
- Test to verify the first overload (1.a) of filter
- Filter(callable, col_list, name)
- """
- rdf = ROOT.RDataFrame(5).Define("x", "(double) rdfentry_")
- def x_greater_than_2(x):
- return x>2
- fil1 = rdf.Filter(x_greater_than_2, ["x"], "x is more than 2")
- self.assertTrue(np.array_equal(fil1.AsNumpy()["x"], np.array([3, 4])))
-
- def test_filter_overload1_b(self):
- """
- Test to verify the first overload (1.b) of filter
- Filter(callable, col_list)
- """
- rdf = ROOT.RDataFrame(5).Define("x", "(double) rdfentry_")
- fil1 = rdf.Filter(lambda x: x>2, ["x"])
- self.assertTrue(np.array_equal(fil1.AsNumpy()["x"], np.array([3, 4])))
-
- def test_filter_overload1_c(self):
- """
- Test to verify the first overload (1.c) of filter
- Filter(callable)
- """
- rdf = ROOT.RDataFrame(5).Define("x", "(double) rdfentry_")
- def x_greater_than_2(x):
- return x>2
- fil1 = rdf.Filter(x_greater_than_2)
- self.assertTrue(np.array_equal(fil1.AsNumpy()["x"], np.array([3, 4])))
-
- # CPP Overload 3: Filter(callable, name)
- def test_filter_overload3(self):
- """
- Test to verify the third overload of filter
- Filter(callable, name)
- """
- rdf = ROOT.RDataFrame(5).Define("x", "(double) rdfentry_")
- def x_greater_than_2(x):
- return x>2
- fil1 = rdf.Filter(x_greater_than_2, "x is greater than 2")
- self.assertTrue(np.array_equal(fil1.AsNumpy()["x"], np.array([3, 4])))
-
- def test_capture_from_scope(self):
- rdf = ROOT.RDataFrame(5).Define("x", "(double) rdfentry_")
- y = 2
- def x_greater_than_y(x):
- return x > y
- fil1 = rdf.Filter(x_greater_than_y, "x is greater than 2")
- self.assertTrue(np.array_equal(fil1.AsNumpy()["x"], np.array([3, 4])))
-
def test_cpp_functor(self):
"""
Test that a C++ functor can be passed as a callable argument of a
diff --git a/bindings/pyroot/pythonizations/test/rdf_filter_pyz_helper.py b/bindings/pyroot/pythonizations/test/rdf_filter_pyz_helper.py
deleted file mode 100755
index 367fbd963d131..0000000000000
--- a/bindings/pyroot/pythonizations/test/rdf_filter_pyz_helper.py
+++ /dev/null
@@ -1,121 +0,0 @@
-import ROOT
-import numpy as np
-
-def CreateData():
- """
- This function generates the root files of various datatypes with random values to test them.
- Datatypes could be generated are Strings, Char_t, UChar_t
- """
- # function to create random numbers.. gRandom did not give me signed integers
- @ROOT.Numba.Declare(['int', 'bool'], 'long')
- def random_long(bits, signed):
- if signed:
- low = -1*2**(bits - 1)
- high = 2**(bits - 1) -1
- else:
- low = 0
- high = 2**bits
- return np.random.randint(low, high)
-
- N = 100 # df with 100 entries
- df = ROOT.RDataFrame(N)
-
- col_name = "Short_t"
- df = df.Define(col_name, f"({col_name}) Numba::random_long(16, true)")
-
- col_name = "UShort_t"
- df = df.Define(col_name, f"({col_name}) Numba::random_long(16, false)")
-
- col_name = "Int_t"
- df = df.Define(col_name, f"({col_name}) Numba::random_long(32, true)")
-
- col_name = "UInt_t"
- df = df.Define(col_name, f"({col_name}) Numba::random_long(32, false)")
-
- col_name = "Float_t"
- df = df.Define(col_name, f"({col_name}) gRandom->Gaus()")
-
- col_name = "Float16_t"
- df = df.Define(col_name, f"({col_name}) gRandom->Gaus()")
-
- col_name = "Double_t"
- df = df.Define(col_name, f"({col_name}) gRandom->Gaus()")
-
- col_name = "Double32_t"
- df = df.Define(col_name, f"({col_name}) gRandom->Gaus()")
-
- col_name = "Long64_t"
- df = df.Define(col_name, f"({col_name}) rdfentry_")
-
- col_name = "ULong64_t"
- df = df.Define(col_name, f"({col_name}) rdfentry_")
-
- col_name = "Long_t"
- df = df.Define(col_name, f"({col_name}) rdfentry_")
-
- col_name = "ULong_t"
- df = df.Define(col_name, f"({col_name}) rdfentry_")
-
- col_name = "Bool_t"
- df = df.Define(col_name, f"({col_name}) gRandom->Integer(2)")
-
- df.Snapshot("TestData", "./RDF_Filter_Pyz_TestData.root")
-
-def filter_general(col, x):
- return bool(col > x)
-
-def filter_C(String, x):
- pass
-
-def filter_B(Char_t, x):
- return bool(Char_t > x)
-
-def filter_b(UChar_t, x):
- return bool(UChar_t > x)
-
-def filter_S(Short_t, x):
- return bool(Short_t > x)
-
-def filter_s(UShort_t, x):
- return bool(UShort_t > x)
-
-def filter_I(Int_t, x):
- return bool(Int_t > x)
-
-def filter_i(UInt_t, x):
- return bool(UInt_t > x)
-
-def filter_F(Float_t, x):
- return bool(Float_t > x)
-
-def filter_f(Float16_t, x):
- return bool(Float16_t > x)
-
-def filter_D(Double_t, x):
- return bool(Double_t > x)
-
-def filter_d(Double32_t, x):
- return bool(Double32_t > x)
-
-def filter_L(Long64_t, x):
- return bool(Long64_t > x)
-
-def filter_l(ULong64_t, x):
- return bool(ULong64_t > x)
-
-def filter_G(Long_t, x):
- return bool(Long_t > x)
-
-def filter_g(ULong_t, x):
- return bool(ULong_t > x)
-
-def filter_O(Bool_t, x):
- return bool(x == Bool_t)
-
-TREE_TYPES = ["String","Char_t", "UChar_t", "Short_t", "UShort_t", "Int_t", "UInt_t", "Float_t", "Float16_t", "Double_t", "Double32_t", "Long64_t", "ULong64_t", "Long_t", "ULong_t", "Bool_t"]
-TREE_SYMS = ['C', 'B', 'b', 'S', 's', 'I', 'i', 'F', 'f', 'D', 'd', 'L', 'l', 'G', 'g', 'O'] # 16 Data Types
-TYPE_TO_SYMBOL = dict(zip(TREE_TYPES, TREE_SYMS))
-
-filter_dict = {}
-for i in TREE_SYMS:
- filter_dict[i] = eval("filter_" + i)
diff --git a/bindings/pyroot/pythonizations/test/roofit/roocmdarg.py b/bindings/pyroot/pythonizations/test/roofit/roocmdarg.py
new file mode 100644
index 0000000000000..b5dbe9a45111d
--- /dev/null
+++ b/bindings/pyroot/pythonizations/test/roofit/roocmdarg.py
@@ -0,0 +1,83 @@
+import unittest
+
+import ROOT
+
+# Necessary inside the "eval" call
+RooArgSet = ROOT.RooArgSet
+RooCmdArg = ROOT.RooCmdArg
+
+x = ROOT.RooRealVar("x", "x", 1.0)
+y = ROOT.RooRealVar("y", "y", 2.0)
+z = ROOT.RooRealVar("z", "z", 3.0)
+
+
+def args_equal(arg_1, arg_2):
+ same = True
+
+ same &= str(arg_1.GetName()) == str(arg_2.GetName())
+ same &= str(arg_1.GetTitle()) == str(arg_2.GetTitle())
+
+ for i in range(2):
+ same &= arg_1.getInt(i) == arg_2.getInt(i)
+
+ for i in range(2):
+ same &= arg_1.getDouble(i) == arg_2.getDouble(i)
+
+ for i in range(3):
+ same &= str(arg_1.getString(i)) == str(arg_2.getString(i))
+
+ same &= arg_1.procSubArgs() == arg_2.procSubArgs()
+ same &= arg_1.prefixSubArgs() == arg_2.prefixSubArgs()
+
+ for i in range(2):
+ same &= arg_1.getObject(i) == arg_2.getObject(i)
+
+ def set_equal(set_1, set_2):
+ if set_1 == ROOT.nullptr and set_2 == ROOT.nullptr:
+ return True
+ if set_1 == ROOT.nullptr and set_2 != ROOT.nullptr:
+ return False
+ if set_1 != ROOT.nullptr and set_2 == ROOT.nullptr:
+ return False
+
+ if set_1.size() != set_2.size():
+ return False
+
+ return set_2.hasSameLayout(set_1)
+
+ for i in range(2):
+ same &= set_equal(arg_1.getSet(i), arg_2.getSet(i))
+
+ return same
+
+
+class TestRooArgList(unittest.TestCase):
+ """
+ Test for RooCmdArg pythonizations.
+ """
+
+ def test_constructor_eval(self):
+
+ set_1 = ROOT.RooArgSet(x, y)
+ set_2 = ROOT.RooArgSet(y, z)
+
+ def do_test(*args):
+ arg_1 = ROOT.RooCmdArg(*args)
+
+ # The arg should be able to recreate itself by emitting the right
+ # constructor code:
+ arg_2 = eval(arg_1.constructorCode())
+
+ self.assertTrue(args_equal(arg_1, arg_2))
+
+ nullp = ROOT.nullptr
+
+ # only fill the non-object fields:
+ do_test("Test", -1, 3, 4.2, 4.7, "hello", "world", nullp, nullp, nullp, "s3", nullp, nullp)
+
+ # RooArgSet tests:
+ do_test("Test", -1, 3, 4.2, 4.7, "hello", "world", nullp, nullp, nullp, "s3", set_1, set_2)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/bindings/pyroot/pythonizations/test/tfile_context_manager.py b/bindings/pyroot/pythonizations/test/tfile_context_manager.py
index cd71125e9edc6..7ab1f1e1b17e8 100644
--- a/bindings/pyroot/pythonizations/test/tfile_context_manager.py
+++ b/bindings/pyroot/pythonizations/test/tfile_context_manager.py
@@ -15,7 +15,7 @@ class TFileContextManager(unittest.TestCase):
XMIN = 10
XMAX = 242
- def check_file_data(self, tfile, filename):
+ def check_file_data(self, tfile, filename, histoname):
"""
Check status of the TFile after the context manager and correctness of
the data it contains.
@@ -24,7 +24,7 @@ def check_file_data(self, tfile, filename):
self.assertFalse(tfile.IsOpen()) # And it is correctly closed
with TFile(filename, "read") as infile:
- hin = infile.Get("myhisto")
+ hin = infile.Get(histoname)
xaxis = hin.GetXaxis()
self.assertEqual(self.NBINS, hin.GetNbinsX())
self.assertEqual(self.XMIN, xaxis.GetXmin())
@@ -37,33 +37,36 @@ def test_writeobject(self):
Write a histogram in a file within a context manager, using TDirectory::WriteObject.
"""
filename = "TFileContextManager_test_writeobject.root"
+ histoname = "myhisto"
with TFile(filename, "recreate") as outfile:
- hout = ROOT.TH1F("myhisto", "myhisto", self.NBINS, self.XMIN, self.XMAX)
+ hout = ROOT.TH1F(histoname, histoname, self.NBINS, self.XMIN, self.XMAX)
outfile.WriteObject(hout, "myhisto")
- self.check_file_data(outfile, filename)
+ self.check_file_data(outfile, filename, histoname)
def test_histowrite(self):
"""
Write a histogram in a file within a context manager, using TH1::Write.
"""
filename = "TFileContextManager_test_histowrite.root"
+ histoname = "myhisto_2"
with TFile(filename, "recreate") as outfile:
- hout = ROOT.TH1F("myhisto", "mhisto", self.NBINS, self.XMIN, self.XMAX)
+ hout = ROOT.TH1F(histoname, histoname, self.NBINS, self.XMIN, self.XMAX)
hout.Write()
- self.check_file_data(outfile, filename)
+ self.check_file_data(outfile, filename, histoname)
def test_filewrite(self):
"""
Write a histogram in a file within a context manager, using TFile::Write.
"""
filename = "TFileContextManager_test_filewrite.root"
+ histoname = "myhisto_3"
with TFile(filename, "recreate") as outfile:
- hout = ROOT.TH1F("myhisto", "myhisto", self.NBINS, self.XMIN, self.XMAX)
+ hout = ROOT.TH1F(histoname, histoname, self.NBINS, self.XMIN, self.XMAX)
outfile.Write()
- self.check_file_data(outfile, filename)
+ self.check_file_data(outfile, filename, histoname)
def test_detachhisto(self):
"""
diff --git a/bindings/tpython/src/TPyClassGenerator.cxx b/bindings/tpython/src/TPyClassGenerator.cxx
index 05104aae69344..14629201febc5 100644
--- a/bindings/tpython/src/TPyClassGenerator.cxx
+++ b/bindings/tpython/src/TPyClassGenerator.cxx
@@ -87,7 +87,12 @@ TClass *TPyClassGenerator::GetClass(const char *name, Bool_t load, Bool_t silent
std::string func_name = PyUnicode_AsUTF8(key);
// figure out number of variables required
+#if PY_VERSION_HEX < 0x30d00f0
PyObject *func_code = PyObject_GetAttrString(attr, (char *)"func_code");
+#else
+ PyObject *func_code = nullptr;
+ PyObject_GetOptionalAttrString(attr, (char *)"func_code", &func_code);
+#endif
PyObject *var_names = func_code ? PyObject_GetAttrString(func_code, (char *)"co_varnames") : NULL;
int nVars = var_names ? PyTuple_GET_SIZE(var_names) : 0 /* TODO: probably large number, all default? */;
if (nVars < 0)
diff --git a/builtins/davix/CMakeLists.txt b/builtins/davix/CMakeLists.txt
index 7ac84367df0e7..9f64ec64314cf 100644
--- a/builtins/davix/CMakeLists.txt
+++ b/builtins/davix/CMakeLists.txt
@@ -10,9 +10,9 @@ find_package(libuuid REQUIRED)
find_package(LibXml2 REQUIRED)
find_package(OpenSSL REQUIRED)
-set(DAVIX_VERSION "0.8.7")
+set(DAVIX_VERSION "0.8.7p1")
set(DAVIX_URL "http://lcgpackages.web.cern.ch/lcgpackages/tarFiles/sources")
-set(DAVIX_URLHASH "SHA256=78c24e14edd7e4e560392d67147ec8658c2aa0d3640415bdf6bc513afcf695e6")
+set(DAVIX_URLHASH "SHA256=d4eee9f20aa032893ce488273cc0bfb62bcad8e2a1afa6b260130508eaf3ce54")
set(DAVIX_PREFIX ${CMAKE_CURRENT_BINARY_DIR}/DAVIX-prefix)
set(DAVIX_LIBNAME ${CMAKE_STATIC_LIBRARY_PREFIX}davix${CMAKE_STATIC_LIBRARY_SUFFIX})
diff --git a/builtins/rendercore/RenderCore-1.5.tar.gz b/builtins/rendercore/RenderCore-1.5.tar.gz
deleted file mode 100644
index 929e79c238b7f..0000000000000
Binary files a/builtins/rendercore/RenderCore-1.5.tar.gz and /dev/null differ
diff --git a/builtins/rendercore/RenderCore-1.6.tar.gz b/builtins/rendercore/RenderCore-1.6.tar.gz
new file mode 100644
index 0000000000000..4f35eee6ff12d
Binary files /dev/null and b/builtins/rendercore/RenderCore-1.6.tar.gz differ
diff --git a/builtins/xrootd/CMakeLists.txt b/builtins/xrootd/CMakeLists.txt
index b8d4731587fd2..c103cf95b8e4d 100644
--- a/builtins/xrootd/CMakeLists.txt
+++ b/builtins/xrootd/CMakeLists.txt
@@ -6,7 +6,7 @@
include(ExternalProject)
-set(XROOTD_VERSION "5.7.1")
+set(XROOTD_VERSION "5.7.2")
set(XROOTD_PREFIX ${CMAKE_BINARY_DIR})
message(STATUS "Downloading and building XROOTD version ${XROOTD_VERSION}")
@@ -25,7 +25,7 @@ list(REMOVE_DUPLICATES XROOTD_UTILS_LIBRARIES)
ExternalProject_Add(
BUILTIN_XROOTD
URL http://lcgpackages.web.cern.ch/lcgpackages/tarFiles/sources/xrootd-${XROOTD_VERSION}.tar.gz
- URL_HASH SHA256=c28c9dc0a2f5d0134e803981be8b1e8b1c9a6ec13b49f5fa3040889b439f4041
+ URL_HASH SHA256=c14c537edc66824ad3ca3c610240f9386c68993cbbcd28473ad3b42c8d14ba67
INSTALL_DIR ${XROOTD_PREFIX}
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=
-DCMAKE_PREFIX_PATH:STRING=${OPENSSL_PREFIX}
diff --git a/cmake/modules/RootCPack.cmake b/cmake/modules/RootCPack.cmake
index f65ae71b332a2..400ccdde3cd41 100644
--- a/cmake/modules/RootCPack.cmake
+++ b/cmake/modules/RootCPack.cmake
@@ -84,7 +84,7 @@ if(MSVC)
else()
message(FATAL_ERROR "MSVC_VERSION ${MSVC_VERSION} not implemented")
endif()
- set(COMPILER_NAME_VERSION ".vc${VS_VERSION}")
+ set(COMPILER_NAME_VERSION ".python${Python3_VERSION_MAJOR}${Python3_VERSION_MINOR}.vc${VS_VERSION}")
else()
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
set(COMPILER_NAME_VERSION "-gcc${CXX_MAJOR}.${CXX_MINOR}")
diff --git a/cmake/modules/RootConfiguration.cmake b/cmake/modules/RootConfiguration.cmake
index bc458b91fdc7f..3faf6090c8a97 100644
--- a/cmake/modules/RootConfiguration.cmake
+++ b/cmake/modules/RootConfiguration.cmake
@@ -529,12 +529,12 @@ else()
set(hashardwareinterferencesize undef)
endif()
+set(root_canvas_class "TRootCanvas")
+
if(webgui)
- set(root_canvas_class "TWebCanvas")
set(root_treeviewer_class "RTreeViewer")
set(root_geompainter_type "web")
else()
- set(root_canvas_class "TRootCanvas")
set(root_treeviewer_class "TTreeViewer")
set(root_geompainter_type "root")
endif()
diff --git a/cmake/modules/SearchInstalledSoftware.cmake b/cmake/modules/SearchInstalledSoftware.cmake
index 3ba540b6ee68b..509ed53ffba97 100644
--- a/cmake/modules/SearchInstalledSoftware.cmake
+++ b/cmake/modules/SearchInstalledSoftware.cmake
@@ -668,11 +668,11 @@ if((opengl OR cocoa) AND NOT builtin_glew)
find_package(GLEW REQUIRED)
else()
find_package(GLEW)
- # Bug was reported on newer version of CMake on Mac OS X:
- # https://gitlab.kitware.com/cmake/cmake/-/issues/19662
- # https://github.com/microsoft/vcpkg/pull/7967
- if(GLEW_FOUND AND APPLE AND CMAKE_VERSION VERSION_GREATER 3.15)
- message(FATAL_ERROR "Please enable builtin Glew due bug in latest CMake (use cmake option -Dbuiltin_glew=ON).")
+ if(GLEW_FOUND AND APPLE AND CMAKE_VERSION VERSION_GREATER 3.15 AND CMAKE_VERSION VERSION_LESS 3.25)
+ # Bug in CMake on Mac OS X until 3.25:
+ # https://gitlab.kitware.com/cmake/cmake/-/issues/19662
+ # https://github.com/microsoft/vcpkg/pull/7967
+ message(FATAL_ERROR "Please enable builtin Glew due to a bug in CMake's FindGlew < v3.25 (use cmake option -Dbuiltin_glew=ON).")
unset(GLEW_FOUND)
elseif(GLEW_FOUND AND NOT TARGET GLEW::GLEW)
add_library(GLEW::GLEW UNKNOWN IMPORTED)
@@ -1273,7 +1273,10 @@ if(builtin_tbb)
install(DIRECTORY ${CMAKE_BINARY_DIR}/bin/ DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT libraries FILES_MATCHING PATTERN "tbb*.dll")
install(DIRECTORY ${CMAKE_BINARY_DIR}/lib/ DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT libraries FILES_MATCHING PATTERN "tbb*.lib")
else()
- set(TBB_LIBRARIES ${CMAKE_BINARY_DIR}/lib/libtbb${CMAKE_SHARED_LIBRARY_SUFFIX})
+ if (CMAKE_BUILD_TYPE STREQUAL "Debug")
+ set(tbbsuffix "_debug")
+ endif()
+ set(TBB_LIBRARIES ${CMAKE_BINARY_DIR}/lib/libtbb${tbbsuffix}${CMAKE_SHARED_LIBRARY_SUFFIX})
install(DIRECTORY ${CMAKE_BINARY_DIR}/lib/ DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT libraries FILES_MATCHING PATTERN "libtbb*")
endif()
if(tbb_build)
@@ -1442,7 +1445,7 @@ if(builtin_veccore)
endif()
if(builtin_veccore)
- set(VecCore_VERSION "0.7.0")
+ set(VecCore_VERSION "0.8.2")
set(VecCore_PROJECT "VecCore-${VecCore_VERSION}")
set(VecCore_SRC_URI "${lcgpackages}/${VecCore_PROJECT}.tar.gz")
set(VecCore_DESTDIR "${CMAKE_BINARY_DIR}/externals")
@@ -1450,7 +1453,7 @@ if(builtin_veccore)
ExternalProject_Add(VECCORE
URL ${VecCore_SRC_URI}
- URL_HASH SHA256=61d9fc4be815c5c98088c2796763d3ed82ba4bad5a69b7892c1c2e7e1e53d311
+ URL_HASH SHA256=1268bca92acf00acd9775f1e79a2da7b1d902733d17e283e0dd5e02c41ac9666
BUILD_IN_SOURCE 0
LOG_DOWNLOAD 1 LOG_CONFIGURE 1 LOG_BUILD 1 LOG_INSTALL 1
CMAKE_ARGS -G ${CMAKE_GENERATOR}
@@ -2010,8 +2013,8 @@ if(webgui)
endif()
ExternalProject_Add(
RENDERCORE
- URL ${CMAKE_SOURCE_DIR}/builtins/rendercore/RenderCore-1.5.tar.gz
- URL_HASH SHA256=c3f58e952e85308ba62142cba2ae627e6bcfcaa6ec1071e1483d1938d3df4a8e
+ URL ${CMAKE_SOURCE_DIR}/builtins/rendercore/RenderCore-1.6.tar.gz
+ URL_HASH SHA256=2fac6bfaef7ae8162091dfda7b2c2cfe3c5cbf841426d948d39deba72d860734
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
diff --git a/cmake/modules/SetROOTVersion.cmake b/cmake/modules/SetROOTVersion.cmake
index fe14f287e5195..a428860d9f22c 100644
--- a/cmake/modules/SetROOTVersion.cmake
+++ b/cmake/modules/SetROOTVersion.cmake
@@ -22,9 +22,17 @@ function(SET_VERSION_FROM_FILE)
string(REGEX MATCH "#define ROOT_VERSION_MAJOR ([0-9]*)" _ ${versionstr})
set(ROOT_MAJOR_VERSION ${CMAKE_MATCH_1})
string(REGEX MATCH "#define ROOT_VERSION_MINOR ([0-9]*)" _ ${versionstr})
- set(ROOT_MINOR_VERSION ${CMAKE_MATCH_1})
+ if (CMAKE_MATCH_1 LESS 10)
+ set(ROOT_MINOR_VERSION "0${CMAKE_MATCH_1}")
+ else()
+ set(ROOT_MINOR_VERSION ${CMAKE_MATCH_1})
+ endif()
string(REGEX MATCH "#define ROOT_VERSION_PATCH ([0-9]*)" _ ${versionstr})
- set(ROOT_PATCH_VERSION ${CMAKE_MATCH_1})
+ if (CMAKE_MATCH_1 LESS 10)
+ set(ROOT_PATCH_VERSION "0${CMAKE_MATCH_1}")
+ else()
+ set(ROOT_PATCH_VERSION ${CMAKE_MATCH_1})
+ endif()
set(ROOT_MAJOR_VERSION "${ROOT_MAJOR_VERSION}" PARENT_SCOPE)
set(ROOT_MINOR_VERSION "${ROOT_MINOR_VERSION}" PARENT_SCOPE)
diff --git a/config/rootrc.in b/config/rootrc.in
index 19551c541a608..f1bfd4c775200 100644
--- a/config/rootrc.in
+++ b/config/rootrc.in
@@ -255,6 +255,8 @@ WebGui.HttpBind:
WebGui.HttpLoopback: yes
# Require unique single-time token (key) for connecting with widget (default - yes)
WebGui.OnetimeKey: yes
+# Only single connection allowed to any web widget
+WebGui.SingleConnMode: yes
# Use https protocol for the http server (default - no)
WebGui.UseHttps: no
WebGui.ServerCert: rootserver.pem
diff --git a/config/rootssh b/config/rootssh
index a8afb94a70609..0ef88feb179c3 100755
--- a/config/rootssh
+++ b/config/rootssh
@@ -42,25 +42,39 @@ elif [[ "$1" == "--as-listener--" ]] ; then
used_browser=$4
flag=1
+ NUM=1
- while [ $flag -ne 0 ] ; do
+ touch $listener_socket.log
- line="$(nc -l -U $listener_socket)"
+ # on MacOS it is not possible to start netcat multiple times with same socket
+ # therefore run it permanently and redirect output to log file
+ nc -k -l -U $listener_socket >$listener_socket.log 2>/dev/null &
- if [[ "${line:0:5}" == "http:" ]] ; then
- remoteport=${line:5}
- # echo "Want to map remote port $localport:localhost:$remoteport"
- elif [[ "${line:0:7}" == "socket:" ]] ; then
- remotesocket=${line:7}
- # echo "Remote socket was created $remotesocket"
+ #remember processid to be able kill it
+ nc_procid=$!
+
+ # protect socket and log file from reading
+ chmod 0700 $listener_socket $listener_socket.log
+
+ # remove netcat listening on socket
+ trap "kill -SIGINT $nc_procid >/dev/null 2>&1; rm -f $listener_socket.log" 0 1 2 3 6
+
+ while [[ ($flag -ne 0) && (-f $listener_socket.log) ]] ; do
+
+ line=$(sed "${NUM}q;d" $listener_socket.log)
+
+ if [[ "${line}" == "" ]] ; then
+ sleep 0.2
elif [[ "${line:0:4}" == "win:" ]] ; then
+ NUM=$((NUM+1))
winurl=${line:4}
# echo "Start window http://localhost:$local_port/$winurl"
$used_browser "http://localhost:$local_port/$winurl"
elif [[ "$line" == "stop" ]] ; then
+ # echo "Get stop command $line"
flag=0
else
- echo "Command not recognized $line - stop"
+ echo "rootssh: got $line - not recoginzed, stop listener"
flag=0
fi
done
@@ -147,7 +161,10 @@ else
listener_processid=$!
- # start ssh
+ # by the exit kill listener and remove temporary files
+ trap "kill -SIGINT $listener_processid > /dev/null 2>&1; rm -f $listener_local $listener_local.log $listener_remote $root_socket" 0 1 2 3 6
+
+ # starting ssh
if [[ "x$ssh_command" == "x" ]] ; then
ssh_command="\$SHELL"
@@ -156,15 +173,4 @@ else
ssh -t -R $listener_remote:$listener_local -L $localport:$root_socket $ssh_destination $ssh_args \
"chmod 0700 $listener_remote; export ROOT_WEBDISPLAY=server; export ROOT_LISTENER_SOCKET=$listener_remote; export ROOT_WEBGUI_SOCKET=$root_socket; $ssh_command; rm -f $listener_remote $root_socket"
- # try to stop listener with "stop" message
-
- echo "stop" | nc -U $listener_local -q 1 >/dev/null 2>&1
-
- # Kill listener process
-
- kill -9 $listener_processid > /dev/null 2>&1
-
- # Remove temporary files
-
- rm -f $listener_local $listener_remote
fi
diff --git a/core/base/inc/TSystem.h b/core/base/inc/TSystem.h
index 72881a775b2b3..ccd1aee4c651b 100644
--- a/core/base/inc/TSystem.h
+++ b/core/base/inc/TSystem.h
@@ -38,6 +38,16 @@ class TSeqCollection;
class TFdSet;
class TVirtualMutex;
+/*! \enum ESocketBindOption
+ \brief Options for binging the sockets created
+
+ These values can be used to configure the binding of the opened sockets.
+*/
+enum ESocketBindOption {
+ kInaddrAny = 0, ///< Any address for socket binding
+ kInaddrLoopback = 1, ///< Refers to the local host via the loopback device
+};
+
enum EAccessMode {
kFileExists = 0,
kExecutePermission = 1,
@@ -501,8 +511,9 @@ class TSystem : public TNamed {
virtual int GetServiceByName(const char *service);
virtual char *GetServiceByPort(int port);
virtual int OpenConnection(const char *server, int port, int tcpwindowsize = -1, const char *protocol = "tcp");
- virtual int AnnounceTcpService(int port, Bool_t reuse, int backlog, int tcpwindowsize = -1);
- virtual int AnnounceUdpService(int port, int backlog);
+ virtual int AnnounceTcpService(int port, Bool_t reuse, int backlog, int tcpwindowsize = -1,
+ ESocketBindOption socketBindOption = ESocketBindOption::kInaddrAny);
+ virtual int AnnounceUdpService(int port, int backlog, ESocketBindOption socketBindOption = ESocketBindOption::kInaddrAny);
virtual int AnnounceUnixService(int port, int backlog);
virtual int AnnounceUnixService(const char *sockpath, int backlog);
virtual int AcceptConnection(int sock);
diff --git a/core/base/src/TColor.cxx b/core/base/src/TColor.cxx
index 752dbaee5614d..f53f0bfedac1a 100644
--- a/core/base/src/TColor.cxx
+++ b/core/base/src/TColor.cxx
@@ -1246,7 +1246,7 @@ void TColor::InitializeColors()
new TColor(kP6Red, 228./255., 37./255., 54./255., "kP6Red");
new TColor(kP6Grape, 150./255., 74./255., 139./255., "kP6Grape");
new TColor(kP6Gray, 156./255., 156./255., 161./255., "kP6Gray");
- new TColor(kP6Violet, 112./255., 33./255., 221./255., "kP6Violet");
+ new TColor(kP6Violet, 122./255., 33./255., 221./255., "kP6Violet");
new TColor(kP8Blue, 24./255., 69./255., 251./255., "kP8Blue");
new TColor(kP8Orange, 1., 94./255., 2./255., "kP8Orange");
diff --git a/core/base/src/TROOT.cxx b/core/base/src/TROOT.cxx
index de68e9de9ba54..d06ea08a0d9bf 100644
--- a/core/base/src/TROOT.cxx
+++ b/core/base/src/TROOT.cxx
@@ -2833,7 +2833,6 @@ void TROOT::SetWebDisplay(const char *webdisplay)
const char *wd = webdisplay ? webdisplay : "";
// store default values to set them back when needed
- static TString canName = gEnv->GetValue("Canvas.Name", "");
static TString brName = gEnv->GetValue("Browser.Name", "");
static TString trName = gEnv->GetValue("TreeViewer.Name", "");
static TString geomName = gEnv->GetValue("GeomPainter.Name", "");
@@ -2867,9 +2866,9 @@ void TROOT::SetWebDisplay(const char *webdisplay)
}
if (fIsWebDisplay) {
- // restore canvas and browser classes configured at the moment when gROOT->SetWebDisplay() was called for the first time
+ // restore browser classes configured at the moment when gROOT->SetWebDisplay() was called for the first time
// This is necessary when SetWebDisplay() called several times and therefore current settings may differ
- gEnv->SetValue("Canvas.Name", canName);
+ gEnv->SetValue("Canvas.Name", "TWebCanvas");
gEnv->SetValue("Browser.Name", brName);
gEnv->SetValue("TreeViewer.Name", trName);
gEnv->SetValue("GeomPainter.Name", geomName);
diff --git a/core/base/src/TSystem.cxx b/core/base/src/TSystem.cxx
index 7685bd249568f..a3c8bf639b54e 100644
--- a/core/base/src/TSystem.cxx
+++ b/core/base/src/TSystem.cxx
@@ -2342,7 +2342,7 @@ int TSystem::OpenConnection(const char *, int, int, const char *)
////////////////////////////////////////////////////////////////////////////////
/// Announce TCP/IP service.
-int TSystem::AnnounceTcpService(int, Bool_t, int, int)
+int TSystem::AnnounceTcpService(int, Bool_t, int, int, ESocketBindOption)
{
AbstractMethod("AnnounceTcpService");
return -1;
@@ -2351,7 +2351,7 @@ int TSystem::AnnounceTcpService(int, Bool_t, int, int)
////////////////////////////////////////////////////////////////////////////////
/// Announce UDP service.
-int TSystem::AnnounceUdpService(int, int)
+int TSystem::AnnounceUdpService(int, int, ESocketBindOption)
{
AbstractMethod("AnnounceUdpService");
return -1;
diff --git a/core/clingutils/src/TClingUtils.cxx b/core/clingutils/src/TClingUtils.cxx
index 58d58dadd47a4..3746617a150be 100644
--- a/core/clingutils/src/TClingUtils.cxx
+++ b/core/clingutils/src/TClingUtils.cxx
@@ -22,6 +22,7 @@
#include
#include
#include
+#include
#include "RConfigure.h"
#include
@@ -3313,10 +3314,9 @@ void ROOT::TMetaUtils::GetCppName(std::string &out, const char *in)
out.push_back(c);
}
- // Remove initial numbers if any
- auto firstNonNumber = out.find_first_not_of("0123456789");
- if (firstNonNumber != std::string::npos)
- out.replace(0,firstNonNumber,"");
+ // If out is empty, or if it starts with a number, it's not a valid C++ variable. Prepend a "_"
+ if (out.empty() || isdigit(out[0]))
+ out.insert(out.begin(), '_');
}
static clang::SourceLocation
diff --git a/core/cont/inc/TList.h b/core/cont/inc/TList.h
index e9b432605c836..a86af707c175c 100644
--- a/core/cont/inc/TList.h
+++ b/core/cont/inc/TList.h
@@ -71,8 +71,6 @@ friend class TListIter;
TList() : fAscending(kTRUE) { }
- TList(TObject *) R__DEPRECATED(6, 34, "The argument is ignored. Use the default constructor TList::TList().") : fAscending(kTRUE) { } // for backward compatibility, don't use
-
virtual ~TList();
void Clear(Option_t *option="") override;
void Delete(Option_t *option="") override;
diff --git a/core/foundation/inc/ROOT/RConfig.hxx b/core/foundation/inc/ROOT/RConfig.hxx
index edd7305dedb5e..2b3af250e7968 100644
--- a/core/foundation/inc/ROOT/RConfig.hxx
+++ b/core/foundation/inc/ROOT/RConfig.hxx
@@ -491,6 +491,14 @@
# define _R__DEPRECATED_636(REASON) _R_DEPRECATED_REMOVE_NOW(REASON)
#endif
+/* USE AS `R__DEPRECATED(6,38, "Not threadsafe; use TFoo::Bar().")`
+ To be removed by 6.38 */
+#if ROOT_VERSION_CODE <= ROOT_VERSION(6,37,0)
+# define _R__DEPRECATED_638(REASON) _R__DEPRECATED_LATER(REASON)
+#else
+# define _R__DEPRECATED_638(REASON) _R_DEPRECATED_REMOVE_NOW(REASON)
+#endif
+
/* USE AS `R__DEPRECATED(7,00, "Not threadsafe; use TFoo::Bar().")`
To be removed by 7.00 */
#if ROOT_VERSION_CODE < ROOT_VERSION(6,99,0)
diff --git a/core/foundation/inc/ROOT/RVersion.hxx b/core/foundation/inc/ROOT/RVersion.hxx
index 6179eedb84a28..d97c45ceace2e 100644
--- a/core/foundation/inc/ROOT/RVersion.hxx
+++ b/core/foundation/inc/ROOT/RVersion.hxx
@@ -3,9 +3,9 @@
/* Update on release: */
#define ROOT_VERSION_MAJOR 6
-#define ROOT_VERSION_MINOR 33
-#define ROOT_VERSION_PATCH 01
-#define ROOT_RELEASE_DATE "Oct 10 2023"
+#define ROOT_VERSION_MINOR 34
+#define ROOT_VERSION_PATCH 4
+#define ROOT_RELEASE_DATE "Feb 10 2025"
/* Don't change the lines below. */
@@ -23,11 +23,26 @@
#define ROOT_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
#define ROOT_VERSION_CODE ROOT_VERSION(ROOT_VERSION_MAJOR, ROOT_VERSION_MINOR, ROOT_VERSION_PATCH)
-#define R__VERS_QUOTE1(P) #P
-#define R__VERS_QUOTE(P) R__VERS_QUOTE1(P)
+#define R__VERS_QUOTE1_MAJOR(P) #P
+#define R__VERS_QUOTE_MAJOR(P) R__VERS_QUOTE1_MAJOR(P)
-#define ROOT_RELEASE R__VERS_QUOTE(ROOT_VERSION_MAJOR) \
- "." R__VERS_QUOTE(ROOT_VERSION_MINOR) \
- "." R__VERS_QUOTE(ROOT_VERSION_PATCH)
+
+#if ROOT_VERSION_MINOR < 10
+#define R__VERS_QUOTE1_MINOR(P) "0" #P
+#else
+#define R__VERS_QUOTE1_MINOR(P) #P
+#endif
+#define R__VERS_QUOTE_MINOR(P) R__VERS_QUOTE1_MINOR(P)
+
+#if ROOT_VERSION_PATCH < 10
+#define R__VERS_QUOTE1_PATCH(P) "0" #P
+#else
+#define R__VERS_QUOTE1_PATCH(P) #P
+#endif
+#define R__VERS_QUOTE_PATCH(P) R__VERS_QUOTE1_PATCH(P)
+
+#define ROOT_RELEASE R__VERS_QUOTE_MAJOR(ROOT_VERSION_MAJOR) \
+ "." R__VERS_QUOTE_MINOR(ROOT_VERSION_MINOR) \
+ "." R__VERS_QUOTE_PATCH(ROOT_VERSION_PATCH)
#endif // ROOT_RVERSION_H
diff --git a/core/meta/inc/TClass.h b/core/meta/inc/TClass.h
index ff250aae7ef5a..e8c1a16339d47 100644
--- a/core/meta/inc/TClass.h
+++ b/core/meta/inc/TClass.h
@@ -307,7 +307,7 @@ friend class TStreamerInfo;
void SetClassSize(Int_t sizof) { fSizeof = sizof; }
TVirtualStreamerInfo* DetermineCurrentStreamerInfo();
- void SetStreamerImpl();
+ void SetStreamerImpl(Int_t streamerType);
void SetRuntimeProperties();
diff --git a/core/meta/src/TClass.cxx b/core/meta/src/TClass.cxx
index d7658a38027e1..817e6ce4d7a47 100644
--- a/core/meta/src/TClass.cxx
+++ b/core/meta/src/TClass.cxx
@@ -751,7 +751,9 @@ void TDumpMembers::Inspect(TClass *cl, const char *pname, const char *mname, con
line[kvalue] = 0;
}
} else {
- strncpy(&line[kvalue], membertype->AsString(p3pointer), TMath::Min(kline-1-kvalue,(int)strlen(membertype->AsString(p3pointer))));
+ line[kvalue] = '-';
+ line[kvalue+1] = '>';
+ strncpy(&line[kvalue+2], membertype->AsString(p3pointer), TMath::Min(kline-1-kvalue-2,(int)strlen(membertype->AsString(p3pointer))));
}
} else if (!strcmp(memberFullTypeName, "char*") ||
!strcmp(memberFullTypeName, "const char*")) {
@@ -6166,14 +6168,21 @@ Long_t TClass::Property() const
// Avoid asking about the class when it is still building
if (TestBit(kLoading)) return fProperty;
+ if (fStreamerType != kDefault && !HasInterpreterInfo()) {
+ // We have no interpreter information but we already set the streamer type
+ // so we have already been here and have no new information, then let's
+ // give up. See the code at this end of this routine (else branch of the
+ // `if (HasInterpreterInfo()` for the path we took before.
+ return 0;
+ }
+
// When called via TMapFile (e.g. Update()) make sure that the dictionary
// gets allocated on the heap and not in the mapped file.
TMmallocDescTemp setreset;
TClass *kl = const_cast(this);
- kl->fStreamerType = TClass::kDefault;
- kl->fStreamerImpl = &TClass::StreamerDefault;
+ Int_t streamerType = TClass::kDefault;
if (InheritsFrom(TObject::Class())) {
kl->SetBit(kIsTObject);
@@ -6182,8 +6191,7 @@ Long_t TClass::Property() const
Int_t delta = kl->GetBaseClassOffsetRecurse(TObject::Class());
if (delta==0) kl->SetBit(kStartWithTObject);
- kl->fStreamerType = kTObject;
- kl->fStreamerImpl = &TClass::StreamerTObject;
+ streamerType = kTObject;
}
if (HasInterpreterInfo()) {
@@ -6195,33 +6203,30 @@ Long_t TClass::Property() const
if (!const_cast(this)->GetClassMethodWithPrototype("Streamer","TBuffer&",kFALSE)) {
kl->SetBit(kIsForeign);
- kl->fStreamerType = kForeign;
- kl->fStreamerImpl = &TClass::StreamerStreamerInfo;
+ streamerType = kForeign;
- } else if ( kl->fStreamerType == TClass::kDefault ) {
+ } else if (streamerType == TClass::kDefault) {
if (kl->fConvStreamerFunc) {
- kl->fStreamerType = kInstrumented;
- kl->fStreamerImpl = &TClass::ConvStreamerInstrumented;
+ streamerType = kInstrumented;
} else if (kl->fStreamerFunc) {
- kl->fStreamerType = kInstrumented;
- kl->fStreamerImpl = &TClass::StreamerInstrumented;
+ streamerType = kInstrumented;
} else {
// We have an automatic streamer using the StreamerInfo .. no need to go through the
// Streamer method function itself.
- kl->fStreamerType = kInstrumented;
- kl->fStreamerImpl = &TClass::StreamerStreamerInfo;
+ streamerType = kInstrumented;
}
}
if (fStreamer) {
- kl->fStreamerType = kExternal;
- kl->fStreamerImpl = &TClass::StreamerExternal;
+ streamerType = kExternal;
}
if (const_cast(this)->GetClassMethodWithPrototype("Hash", "", kTRUE)) {
kl->SetBit(kHasLocalHashMember);
}
+ kl->SetStreamerImpl(streamerType);
+
if (GetClassInfo()) {
// In the case where the TClass for one of ROOT's core class
// (eg TClonesArray for map) is requested
@@ -6236,15 +6241,16 @@ Long_t TClass::Property() const
// and think all test bits have been properly set.
kl->fProperty = gCling->ClassInfo_Property(fClassInfo);
}
+
} else {
if (fStreamer) {
- kl->fStreamerType = kExternal;
- kl->fStreamerImpl = &TClass::StreamerExternal;
+ streamerType = kExternal;
}
- kl->fStreamerType |= kEmulatedStreamer;
- kl->SetStreamerImpl();
+ streamerType |= kEmulatedStreamer;
+
+ kl->SetStreamerImpl(streamerType);
// fProperty was *not* set so that it can be forced to be recalculated
// next time.
return 0;
@@ -6279,8 +6285,9 @@ void TClass::SetRuntimeProperties()
/// Internal routine to set fStreamerImpl based on the value of
/// fStreamerType.
-void TClass::SetStreamerImpl()
+void TClass::SetStreamerImpl(Int_t StreamerType)
{
+ fStreamerType = StreamerType;
switch (fStreamerType) {
case kTObject: fStreamerImpl = &TClass::StreamerTObject; break;
case kForeign: fStreamerImpl = &TClass::StreamerStreamerInfo; break;
diff --git a/core/meta/src/TProtoClass.cxx b/core/meta/src/TProtoClass.cxx
index 26674c1cedd79..1c53d2c73f256 100644
--- a/core/meta/src/TProtoClass.cxx
+++ b/core/meta/src/TProtoClass.cxx
@@ -304,7 +304,6 @@ Bool_t TProtoClass::FillTClass(TClass* cl) {
cl->fCanSplit = fCanSplit;
cl->fProperty = fProperty;
cl->fClassProperty = fClassProperty;
- cl->fStreamerType = fStreamerType;
// Update pointers to TClass
if (cl->fBase.load()) {
@@ -405,7 +404,7 @@ Bool_t TProtoClass::FillTClass(TClass* cl) {
cl->fRealData = new TList(); // FIXME: this should really become a THashList!
}
- cl->SetStreamerImpl();
+ cl->SetStreamerImpl(fStreamerType);
// set to zero in order not to delete when protoclass is deleted
fBase = nullptr;
diff --git a/core/testsupport/src/TestSupport.cxx b/core/testsupport/src/TestSupport.cxx
index 402f57e83a820..9889a432f408f 100644
--- a/core/testsupport/src/TestSupport.cxx
+++ b/core/testsupport/src/TestSupport.cxx
@@ -57,16 +57,6 @@ static struct ForbidDiagnostics {
}
// FIXME: RNTuple warns that it's in beta stage.
- if (level == kWarning
- && strstr(msg, "The RNTuple file format will change. Do not store real data with this version of RNTuple!") != nullptr) {
- std::cerr << "Warning in " << location << " " << msg << std::endl;
- return;
- }
- if (level == kWarning
- && strstr(msg, "Pre-release format version: RC ") != nullptr) {
- std::cerr << "Warning in " << location << " " << msg << std::endl;
- return;
- }
if (level == kWarning && strstr(msg, "Merging RNTuples is experimental") != nullptr) {
std::cerr << "Warning in " << location << " " << msg << std::endl;
return;
diff --git a/core/unix/inc/TUnixSystem.h b/core/unix/inc/TUnixSystem.h
index 35b24f3482fa7..9f89288d96de1 100644
--- a/core/unix/inc/TUnixSystem.h
+++ b/core/unix/inc/TUnixSystem.h
@@ -62,8 +62,8 @@ class TUnixSystem : public TSystem {
static int UnixUnixConnect(int port);
static int UnixUnixConnect(const char *path);
static int UnixTcpService(int port, Bool_t reuse, int backlog,
- int tcpwindowsize);
- static int UnixUdpService(int port, int backlog);
+ int tcpwindowsize, ESocketBindOption socketBindOption);
+ static int UnixUdpService(int port, int backlog, ESocketBindOption socketBindOption);
static int UnixUnixService(int port, int backlog);
static int UnixUnixService(const char *sockpath, int backlog);
static int UnixRecv(int sock, void *buf, int len, int flag);
@@ -197,8 +197,8 @@ class TUnixSystem : public TSystem {
char *GetServiceByPort(int port) override;
int ConnectService(const char *server, int port, int tcpwindowsize, const char *protocol = "tcp");
int OpenConnection(const char *server, int port, int tcpwindowsize = -1, const char *protocol = "tcp") override;
- int AnnounceTcpService(int port, Bool_t reuse, int backlog, int tcpwindowsize = -1) override;
- int AnnounceUdpService(int port, int backlog) override;
+ int AnnounceTcpService(int port, Bool_t reuse, int backlog, int tcpwindowsize = -1, ESocketBindOption socketBindOption = ESocketBindOption::kInaddrAny) override;
+ int AnnounceUdpService(int port, int backlog, ESocketBindOption socketBindOption = ESocketBindOption::kInaddrAny) override;
int AnnounceUnixService(int port, int backlog) override;
int AnnounceUnixService(const char *sockpath, int backlog) override;
int AcceptConnection(int sock) override;
diff --git a/core/unix/src/TUnixSystem.cxx b/core/unix/src/TUnixSystem.cxx
index 83be91c61df09..4f6f8c5e45602 100644
--- a/core/unix/src/TUnixSystem.cxx
+++ b/core/unix/src/TUnixSystem.cxx
@@ -3239,17 +3239,17 @@ int TUnixSystem::OpenConnection(const char *server, int port, int tcpwindowsize,
/// or -3 if listen() failed.
int TUnixSystem::AnnounceTcpService(int port, Bool_t reuse, int backlog,
- int tcpwindowsize)
+ int tcpwindowsize, ESocketBindOption socketBindOption)
{
- return UnixTcpService(port, reuse, backlog, tcpwindowsize);
+ return UnixTcpService(port, reuse, backlog, tcpwindowsize, socketBindOption);
}
////////////////////////////////////////////////////////////////////////////////
/// Announce UDP service.
-int TUnixSystem::AnnounceUdpService(int port, int backlog)
+int TUnixSystem::AnnounceUdpService(int port, int backlog, ESocketBindOption socketBindOption)
{
- return UnixUdpService(port, backlog);
+ return UnixUdpService(port, backlog, socketBindOption);
}
////////////////////////////////////////////////////////////////////////////////
@@ -4291,11 +4291,13 @@ int TUnixSystem::UnixUnixConnect(const char *sockpath)
/// Use tcpwindowsize to specify the size of the receive buffer, it has
/// to be specified here to make sure the window scale option is set (for
/// tcpwindowsize > 65KB and for platforms supporting window scaling).
+/// The socketBindOption parameter allows to specify how the socket will be
+/// bound. See the documentation of ESocketBindOption for the details.
/// Returns socket fd or -1 if socket() failed, -2 if bind() failed
/// or -3 if listen() failed.
int TUnixSystem::UnixTcpService(int port, Bool_t reuse, int backlog,
- int tcpwindowsize)
+ int tcpwindowsize, ESocketBindOption socketBindOption)
{
const short kSOCKET_MINPORT = 5000, kSOCKET_MAXPORT = 15000;
short sport, tryport = kSOCKET_MINPORT;
@@ -4329,7 +4331,7 @@ int TUnixSystem::UnixTcpService(int port, Bool_t reuse, int backlog,
struct sockaddr_in inserver;
memset(&inserver, 0, sizeof(inserver));
inserver.sin_family = AF_INET;
- inserver.sin_addr.s_addr = htonl(INADDR_ANY);
+ inserver.sin_addr.s_addr = socketBindOption == ESocketBindOption::kInaddrAny ? htonl(INADDR_ANY) : htonl(INADDR_LOOPBACK);
inserver.sin_port = sport;
// Bind socket
@@ -4369,8 +4371,10 @@ int TUnixSystem::UnixTcpService(int port, Bool_t reuse, int backlog,
/// how many sockets can be waiting to be accepted. If port is 0 a port
/// scan will be done to find a free port. This option is mutual exlusive
/// with the reuse option.
+/// The socketBindOption parameter allows to specify how the socket will be
+/// bound. See the documentation of ESocketBindOption for the details.
-int TUnixSystem::UnixUdpService(int port, int backlog)
+int TUnixSystem::UnixUdpService(int port, int backlog, ESocketBindOption socketBindOption)
{
const short kSOCKET_MINPORT = 5000, kSOCKET_MAXPORT = 15000;
short sport, tryport = kSOCKET_MINPORT;
@@ -4391,7 +4395,7 @@ int TUnixSystem::UnixUdpService(int port, int backlog)
struct sockaddr_in inserver;
memset(&inserver, 0, sizeof(inserver));
inserver.sin_family = AF_INET;
- inserver.sin_addr.s_addr = htonl(INADDR_ANY);
+ inserver.sin_addr.s_addr = socketBindOption == ESocketBindOption::kInaddrAny ? htonl(INADDR_ANY) : htonl(INADDR_LOOPBACK);
inserver.sin_port = sport;
// Bind socket
diff --git a/core/winnt/inc/TWinNTSystem.h b/core/winnt/inc/TWinNTSystem.h
index 675bb3f5c743c..59c5031336244 100644
--- a/core/winnt/inc/TWinNTSystem.h
+++ b/core/winnt/inc/TWinNTSystem.h
@@ -232,8 +232,8 @@ class TWinNTSystem : public TSystem {
int GetServiceByName(const char *service) override;
char *GetServiceByPort(int port) override;
int OpenConnection(const char *server, int port, int tcpwindowsize = -1, const char *protocol = "tcp") override;
- int AnnounceTcpService(int port, Bool_t reuse, int backlog, int tcpwindowsize = -1) override;
- int AnnounceUdpService(int port, int backlog) override;
+ int AnnounceTcpService(int port, Bool_t reuse, int backlog, int tcpwindowsize = -1, ESocketBindOption socketBindOption = ESocketBindOption::kInaddrAny) override;
+ int AnnounceUdpService(int port, int backlog, ESocketBindOption socketBindOption = ESocketBindOption::kInaddrAny) override;
int AnnounceUnixService(int port, int backlog) override;
int AnnounceUnixService(const char *sockpath, int backlog) override;
int AcceptConnection(int sock) override;
diff --git a/core/winnt/src/TWinNTSystem.cxx b/core/winnt/src/TWinNTSystem.cxx
index 91071a4c95cf9..b47c594d26831 100644
--- a/core/winnt/src/TWinNTSystem.cxx
+++ b/core/winnt/src/TWinNTSystem.cxx
@@ -5377,11 +5377,13 @@ int TWinNTSystem::OpenConnection(const char *server, int port, int tcpwindowsize
/// Use tcpwindowsize to specify the size of the receive buffer, it has
/// to be specified here to make sure the window scale option is set (for
/// tcpwindowsize > 65KB and for platforms supporting window scaling).
+/// The socketBindOption parameter allows to specify how the socket will be
+/// bound. See the documentation of ESocketBindOption for the details.
/// Returns socket fd or -1 if socket() failed, -2 if bind() failed
/// or -3 if listen() failed.
int TWinNTSystem::AnnounceTcpService(int port, Bool_t reuse, int backlog,
- int tcpwindowsize)
+ int tcpwindowsize, ESocketBindOption socketBindOption)
{
short sport;
struct servent *sp;
@@ -5424,7 +5426,7 @@ int TWinNTSystem::AnnounceTcpService(int port, Bool_t reuse, int backlog,
struct sockaddr_in inserver;
memset(&inserver, 0, sizeof(inserver));
inserver.sin_family = AF_INET;
- inserver.sin_addr.s_addr = ::htonl(INADDR_ANY);
+ inserver.sin_addr.s_addr = socketBindOption == ESocketBindOption::kInaddrAny ? ::htonl(INADDR_ANY) : ::htonl(INADDR_LOOPBACK);
inserver.sin_port = sport;
// Bind socket
@@ -5458,13 +5460,15 @@ int TWinNTSystem::AnnounceTcpService(int port, Bool_t reuse, int backlog,
////////////////////////////////////////////////////////////////////////////////
/// Announce UDP service.
-int TWinNTSystem::AnnounceUdpService(int port, int backlog)
+int TWinNTSystem::AnnounceUdpService(int port, int backlog, ESocketBindOption socketBindOption)
{
// Open a socket, bind to it and start listening for UDP connections
// on the port. If reuse is true reuse the address, backlog specifies
// how many sockets can be waiting to be accepted. If port is 0 a port
// scan will be done to find a free port. This option is mutual exlusive
// with the reuse option.
+ // The socketBindOption parameter allows to specify how the socket will be
+ // bound. See the documentation of ESocketBindOption for the details.
const short kSOCKET_MINPORT = 5000, kSOCKET_MAXPORT = 15000;
short sport, tryport = kSOCKET_MINPORT;
@@ -5485,7 +5489,7 @@ int TWinNTSystem::AnnounceUdpService(int port, int backlog)
struct sockaddr_in inserver;
memset(&inserver, 0, sizeof(inserver));
inserver.sin_family = AF_INET;
- inserver.sin_addr.s_addr = htonl(INADDR_ANY);
+ inserver.sin_addr.s_addr = socketBindOption == ESocketBindOption::kInaddrAny ? htonl(INADDR_ANY) : htonl(INADDR_LOOPBACK);
inserver.sin_port = sport;
// Bind socket
diff --git a/core/zip/inc/Compression.h b/core/zip/inc/Compression.h
index 463d10baad2d3..0eb9bee10671a 100644
--- a/core/zip/inc/Compression.h
+++ b/core/zip/inc/Compression.h
@@ -114,7 +114,7 @@ struct RCompressionSetting {
};
// clang-format off
-enum R__DEPRECATED(6, 34, "Use RCompressionSetting::EAlgorithm instead") ECompressionAlgorithm {
+enum R__DEPRECATED(6, 36, "Use RCompressionSetting::EAlgorithm instead") ECompressionAlgorithm {
kUseGlobalCompressionSetting = static_cast(RCompressionSetting::EAlgorithm::kUseGlobal),
kUseGlobalSetting = static_cast(RCompressionSetting::EAlgorithm::kUseGlobal),
kZLIB = static_cast(RCompressionSetting::EAlgorithm::kZLIB),
@@ -128,7 +128,7 @@ enum R__DEPRECATED(6, 34, "Use RCompressionSetting::EAlgorithm instead") ECompre
int CompressionSettings(RCompressionSetting::EAlgorithm::EValues algorithm, int compressionLevel);
int CompressionSettings(ROOT::ECompressionAlgorithm algorithm, int compressionLevel)
- R__DEPRECATED(6, 34, "Use the overload accepting RCompressionSetting::EAlgorithm instead");
+ R__DEPRECATED(6, 36, "Use the overload accepting RCompressionSetting::EAlgorithm instead");
// clang-format on
} // namespace ROOT
diff --git a/core/zip/inc/RZip.h b/core/zip/inc/RZip.h
index 043446f2d165c..a3433b56c5b8d 100644
--- a/core/zip/inc/RZip.h
+++ b/core/zip/inc/RZip.h
@@ -33,7 +33,7 @@ extern "C" ROOT::RCompressionSetting::EAlgorithm::EValues R__getCompressionAlgor
* R__zipMultipleAlgorithm instead.
*/
extern "C" void R__zip(int cxlevel, int *srcsize, char *src, int *tgtsize, char *tgt, int *irep)
- R__DEPRECATED(6, 34, "use R__zipMultipleAlgorithm instead");
+ R__DEPRECATED(6, 36, "use R__zipMultipleAlgorithm instead");
extern "C" void R__unzip(int *srcsize, unsigned char *src, int *tgtsize, unsigned char *tgt, int *irep);
diff --git a/documentation/doxygen/Doxyfile b/documentation/doxygen/Doxyfile
index 56b40b16c1d8e..9ed9156310359 100644
--- a/documentation/doxygen/Doxyfile
+++ b/documentation/doxygen/Doxyfile
@@ -1594,7 +1594,7 @@ SITEMAP_URL =
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
-GENERATE_QHP = YES
+GENERATE_QHP = NO
# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
# the file name of the resulting .qch file. The path specified is relative to
@@ -1649,7 +1649,7 @@ QHP_SECT_FILTER_ATTRS =
# run qhelpgenerator on the generated .qhp file.
# This tag requires that the tag GENERATE_QHP is set to YES.
-QHG_LOCATION = qhelpgenerator
+QHG_LOCATION = qhelpgenerator-qt5
# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
# generated, together with the HTML files, they form an Eclipse help plugin. To
diff --git a/documentation/doxygen/DoxygenLayout.xml b/documentation/doxygen/DoxygenLayout.xml
index dc37f2e2ef8a3..3b2b0eb707ff7 100644
--- a/documentation/doxygen/DoxygenLayout.xml
+++ b/documentation/doxygen/DoxygenLayout.xml
@@ -1,10 +1,10 @@
-
+
-
+
diff --git a/documentation/doxygen/Makefile b/documentation/doxygen/Makefile
index fdf6add7b0e47..69e7749f5ba50 100644
--- a/documentation/doxygen/Makefile
+++ b/documentation/doxygen/Makefile
@@ -65,7 +65,6 @@ doxygen: filter pyzdoc
doxygen
bash ./CleanNamespaces.sh
gzip $(DOXYGEN_IMAGE_PATH)/ROOT.tag
- gzip $(DOXYGEN_IMAGE_PATH)/ROOT.qch
rm -rf files c1* *.ps *.eps *.png *.jpg *.tex *.svg *.pdf *.root *.xpm *.out *.dat *.dtd *.dot *.txt *.csv *.log *.rs
rm -rf listofclass.sh tmva* data* result* config* test* Roo* My* Freq*
rm -f Doxyfile_INPUT filter htmlfooter.html MDF.C pca.C
diff --git a/documentation/users-guide/WebDisplay.md b/documentation/users-guide/WebDisplay.md
index c84a58f1b09d4..072a615661025 100644
--- a/documentation/users-guide/WebDisplay.md
+++ b/documentation/users-guide/WebDisplay.md
@@ -20,7 +20,8 @@ auto win = ROOT::RWebWindow::Create();
// set HTML page which is showed when window displayed
win->SetDefaultPage("file:page.html"); // set
-// allow unlimitted user connections to the window (default only 1)
+// allow unlimited user connections to the window (default only 1)
+ROOT::RWebWindowsManager::SetSingleConnMode(false);
win->SetConnLimit(0);
// configure predefined geometry
diff --git a/geom/geom/inc/TGeoBBox.h b/geom/geom/inc/TGeoBBox.h
index a6f90a414ee00..9a95d0d8af11b 100644
--- a/geom/geom/inc/TGeoBBox.h
+++ b/geom/geom/inc/TGeoBBox.h
@@ -38,7 +38,7 @@ class TGeoBBox : public TGeoShape {
// methods
static Bool_t
AreOverlapping(const TGeoBBox *box1, const TGeoMatrix *mat1, const TGeoBBox *box2, const TGeoMatrix *mat2)
- R__DEPRECATED(6, 34, "DEPRECATED, DO NOT USE ! The overlap detection does not work for all cases");
+ R__DEPRECATED(6, 36, "DEPRECATED, DO NOT USE ! The overlap detection does not work for all cases");
Double_t Capacity() const override;
void ComputeBBox() override;
diff --git a/geom/geom/inc/bvh/v2/bvh.h b/geom/geom/inc/bvh/v2/bvh.h
index bbf7c117530fd..1c4f04ecda9b3 100644
--- a/geom/geom/inc/bvh/v2/bvh.h
+++ b/geom/geom/inc/bvh/v2/bvh.h
@@ -146,10 +146,12 @@ void Bvh::traverse_top_down(Index start, Stack& stack, LeafFn&& leaf_fn, I
stack.push(far_index);
}
top = near_index;
- } else if (hit_right)
+ } else if (hit_right) {
top = right.index;
- else [[unlikely]]
+ }
+ else [[unlikely]] {
goto restart;
+ }
}
[[maybe_unused]] auto was_hit = leaf_fn(top.first_id(), top.first_id() + top.prim_count());
@@ -163,19 +165,18 @@ template
template
void Bvh::intersect(const Ray& ray, Index start, Stack& stack, LeafFn&& leaf_fn, InnerFn&& inner_fn) const {
auto inv_dir = ray.template get_inv_dir();
- auto inv_org = -inv_dir * ray.org;
- auto inv_dir_pad = ray.pad_inv_dir(inv_dir);
+ auto inv_dir_pad_or_inv_org = IsRobust ? ray.pad_inv_dir(inv_dir) : -inv_dir * ray.org;
auto octant = ray.get_octant();
traverse_top_down(start, stack, leaf_fn, [&] (const Node& left, const Node& right) {
inner_fn(left, right);
std::pair intr_left, intr_right;
if constexpr (IsRobust) {
- intr_left = left .intersect_robust(ray, inv_dir, inv_dir_pad, octant);
- intr_right = right.intersect_robust(ray, inv_dir, inv_dir_pad, octant);
+ intr_left = left .intersect_robust(ray, inv_dir, inv_dir_pad_or_inv_org, octant);
+ intr_right = right.intersect_robust(ray, inv_dir, inv_dir_pad_or_inv_org, octant);
} else {
- intr_left = left .intersect_fast(ray, inv_dir, inv_org, octant);
- intr_right = right.intersect_fast(ray, inv_dir, inv_org, octant);
+ intr_left = left .intersect_fast(ray, inv_dir, inv_dir_pad_or_inv_org, octant);
+ intr_right = right.intersect_fast(ray, inv_dir, inv_dir_pad_or_inv_org, octant);
}
return std::make_tuple(
intr_left.first <= intr_left.second,
diff --git a/geom/geom/src/TGeoManager.cxx b/geom/geom/src/TGeoManager.cxx
index c79e072100e2b..a38f22e05f320 100644
--- a/geom/geom/src/TGeoManager.cxx
+++ b/geom/geom/src/TGeoManager.cxx
@@ -2916,9 +2916,15 @@ Int_t TGeoManager::GetByteCount(Option_t * /*option*/)
TVirtualGeoPainter *TGeoManager::GetGeomPainter()
{
if (!fPainter) {
- const char *kind = gEnv->GetValue("GeomPainter.Name", "");
+ const char *kind = nullptr;
+ if (gPad)
+ kind = gPad->IsWeb() ? "web" : "root";
+ else
+ kind = gEnv->GetValue("GeomPainter.Name", "");
+
if (!kind || !*kind)
kind = (gROOT->IsWebDisplay() && !gROOT->IsWebDisplayBatch()) ? "web" : "root";
+
if (auto h = gROOT->GetPluginManager()->FindHandler("TVirtualGeoPainter", kind)) {
if (h->LoadPlugin() == -1) {
Error("GetGeomPainter", "could not load plugin for %s geo_painter", kind);
diff --git a/graf2d/gpad/src/TPad.cxx b/graf2d/gpad/src/TPad.cxx
index 7cd2e1b343e96..8fd260481c50c 100644
--- a/graf2d/gpad/src/TPad.cxx
+++ b/graf2d/gpad/src/TPad.cxx
@@ -3435,8 +3435,8 @@ void TPad::FillCollideGridTH1(TObject *o)
else y2l = fUymin;
}
y2 = (Int_t)((y2l-fY1)/ys);
- for (j=y1; j<=y2; j++) {
- NotFree(x1, j);
+ for (j=y1; jGetBinLowEdge(i);
diff --git a/graf2d/gpadv7/inc/ROOT/RVirtualCanvasPainter.hxx b/graf2d/gpadv7/inc/ROOT/RVirtualCanvasPainter.hxx
index be2a467280bc9..f5f255ec485e1 100644
--- a/graf2d/gpadv7/inc/ROOT/RVirtualCanvasPainter.hxx
+++ b/graf2d/gpadv7/inc/ROOT/RVirtualCanvasPainter.hxx
@@ -9,6 +9,7 @@
#ifndef ROOT7_RVirtualCanvasPainter
#define ROOT7_RVirtualCanvasPainter
+#include
#include
#include
#include
diff --git a/graf3d/eve7/inc/ROOT/REveChunkManager.hxx b/graf3d/eve7/inc/ROOT/REveChunkManager.hxx
index 538884ee4adad..a03c481f66cd8 100644
--- a/graf3d/eve7/inc/ROOT/REveChunkManager.hxx
+++ b/graf3d/eve7/inc/ROOT/REveChunkManager.hxx
@@ -17,6 +17,7 @@
#include "TArrayC.h"
#include
+#include
namespace ROOT {
namespace Experimental {
diff --git a/graf3d/eve7/inc/ROOT/REveElement.hxx b/graf3d/eve7/inc/ROOT/REveElement.hxx
index 0f9966a138b59..b81866194237f 100644
--- a/graf3d/eve7/inc/ROOT/REveElement.hxx
+++ b/graf3d/eve7/inc/ROOT/REveElement.hxx
@@ -15,6 +15,7 @@
#include
#include
#include
+#include "TString.h"
#include
#include
@@ -339,6 +340,14 @@ protected:
UChar_t fChangeBits{0}; //!
Char_t fDestructing{kNone}; //!
+ static thread_local REveElement *stlMirAlpha;
+ static thread_local int stlMirError;
+ static thread_local std::string stlMirErrorString;
+ static void ClearMirContext();
+ static void SetMirContext(REveElement *el);
+ static void SetMirError(int error, std::string_view err_str="");
+ static void AppendMirErrorString(std::string_view err_str);
+
public:
void StampColorSelection() { AddStamp(kCBColorSelection); }
void StampTransBBox() { AddStamp(kCBTransBBox); }
diff --git a/graf3d/eve7/inc/ROOT/REveGeoShape.hxx b/graf3d/eve7/inc/ROOT/REveGeoShape.hxx
index 894787b1dea15..5f651b92488c3 100644
--- a/graf3d/eve7/inc/ROOT/REveGeoShape.hxx
+++ b/graf3d/eve7/inc/ROOT/REveGeoShape.hxx
@@ -14,6 +14,7 @@
#include
+class TGeoManager;
class TGeoShape;
class TGeoHMatrix;
class TGeoCompositeShape;
diff --git a/graf3d/eve7/inc/ROOT/REveManager.hxx b/graf3d/eve7/inc/ROOT/REveManager.hxx
index a9a21258ab5c5..771d8923201b9 100644
--- a/graf3d/eve7/inc/ROOT/REveManager.hxx
+++ b/graf3d/eve7/inc/ROOT/REveManager.hxx
@@ -158,6 +158,9 @@ protected:
REveServerStatus fServerStatus;
bool fIsRCore{false};
+ // restricted functionality for public use
+ bool fHttpPublic{false};
+
void WindowConnect(unsigned connid);
void WindowData(unsigned connid, const std::string &arg);
void WindowDisconnect(unsigned connid);
@@ -281,6 +284,9 @@ public:
void GetServerStatus(REveServerStatus&);
bool IsRCore() const { return fIsRCore; }
+
+ bool GetHttpPublic() { return fHttpPublic;}
+ void SetHttpPublic(bool);
};
R__EXTERN REveManager* gEve;
diff --git a/graf3d/eve7/inc/ROOT/REveTypes.hxx b/graf3d/eve7/inc/ROOT/REveTypes.hxx
index 5498da002ab1e..49ccf0e6ca004 100644
--- a/graf3d/eve7/inc/ROOT/REveTypes.hxx
+++ b/graf3d/eve7/inc/ROOT/REveTypes.hxx
@@ -13,13 +13,15 @@
#ifndef ROOT7_REveTypes
#define ROOT7_REveTypes
-#include "GuiTypes.h" // For Pixel_t only, to be changed.
+#include "RtypesCore.h"
-#include "TString.h"
+#include
+#include
+
+typedef ULong_t Pixel_t; // from GuiTypes.h
+
+class TString;
-#include
-#include
-class TGeoManager;
namespace ROOT {
namespace Experimental {
typedef unsigned int ElementId_t;
@@ -42,10 +44,12 @@ class REveException : public std::exception {
std::string fWhat;
public:
REveException() = default;
- explicit REveException(const std::string &s) : fWhat(s) {}
+ explicit REveException(std::string_view s) : fWhat(s) {}
~REveException() noexcept override {}
- void append(const std::string &s) { fWhat.append(s); }
+ void append(std::string_view s) { fWhat.append(s); }
+ operator const std::string&() const noexcept { return fWhat; }
+ const std::string &str() const noexcept { return fWhat; }
const char *what() const noexcept override { return fWhat.c_str(); }
};
@@ -54,6 +58,9 @@ REveException operator+(const REveException &s1, const TString &s2);
REveException operator+(const REveException &s1, const char *s2);
REveException operator+(const REveException &s1, ElementId_t x);
+inline std::ostream& operator <<(std::ostream &s, const REveException &e)
+{ s << e.what(); return s; }
+
/// Log channel for Eve diagnostics.
RLogChannel &REveLog();
diff --git a/graf3d/eve7/inc/ROOT/REveUtil.hxx b/graf3d/eve7/inc/ROOT/REveUtil.hxx
index 85f4c5aada0aa..60c21caf795f3 100644
--- a/graf3d/eve7/inc/ROOT/REveUtil.hxx
+++ b/graf3d/eve7/inc/ROOT/REveUtil.hxx
@@ -14,12 +14,11 @@
#include "REveTypes.hxx"
-#include "TError.h"
-
+#include
#include